blob: c39820b2268fde0993fcdac10a4fe8c64d37aa29 [file] [log] [blame]
Michael Chanc0c050c2015-10-22 16:01:17 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
Michael Chan11f15ed2016-04-05 14:08:55 -04003 * Copyright (c) 2014-2016 Broadcom Corporation
Michael Chan894aa692018-01-17 03:21:03 -05004 * Copyright (c) 2016-2018 Broadcom Limited
Michael Chanc0c050c2015-10-22 16:01:17 -04005 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12
13#include <linux/stringify.h>
14#include <linux/kernel.h>
15#include <linux/timer.h>
16#include <linux/errno.h>
17#include <linux/ioport.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <linux/interrupt.h>
21#include <linux/pci.h>
22#include <linux/netdevice.h>
23#include <linux/etherdevice.h>
24#include <linux/skbuff.h>
25#include <linux/dma-mapping.h>
26#include <linux/bitops.h>
27#include <linux/io.h>
28#include <linux/irq.h>
29#include <linux/delay.h>
30#include <asm/byteorder.h>
31#include <asm/page.h>
32#include <linux/time.h>
33#include <linux/mii.h>
34#include <linux/if.h>
35#include <linux/if_vlan.h>
Michael Chan32e8239c2017-07-24 12:34:21 -040036#include <linux/if_bridge.h>
Rob Swindell5ac67d82016-09-19 03:58:03 -040037#include <linux/rtc.h>
Michael Chanc6d30e82017-02-06 16:55:42 -050038#include <linux/bpf.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040039#include <net/ip.h>
40#include <net/tcp.h>
41#include <net/udp.h>
42#include <net/checksum.h>
43#include <net/ip6_checksum.h>
Alexander Duyckad51b8e2016-06-16 12:21:19 -070044#include <net/udp_tunnel.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040045#include <linux/workqueue.h>
46#include <linux/prefetch.h>
47#include <linux/cache.h>
48#include <linux/log2.h>
49#include <linux/aer.h>
50#include <linux/bitmap.h>
51#include <linux/cpu_rmap.h>
Vasundhara Volam56f0fd82017-08-28 13:40:27 -040052#include <linux/cpumask.h>
Sathya Perla2ae74082017-08-28 13:40:33 -040053#include <net/pkt_cls.h>
Vasundhara Volamcde49a42018-08-05 16:51:56 -040054#include <linux/hwmon.h>
55#include <linux/hwmon-sysfs.h>
Michael Chanc0c050c2015-10-22 16:01:17 -040056
57#include "bnxt_hsi.h"
58#include "bnxt.h"
Michael Chana588e452016-12-07 00:26:21 -050059#include "bnxt_ulp.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040060#include "bnxt_sriov.h"
61#include "bnxt_ethtool.h"
Michael Chan7df4ae92016-12-02 21:17:17 -050062#include "bnxt_dcb.h"
Michael Chanc6d30e82017-02-06 16:55:42 -050063#include "bnxt_xdp.h"
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040064#include "bnxt_vfr.h"
Sathya Perla2ae74082017-08-28 13:40:33 -040065#include "bnxt_tc.h"
Steve Lin3c467bf2017-10-19 10:45:56 -040066#include "bnxt_devlink.h"
Andy Gospodarekcabfb092018-04-26 17:44:40 -040067#include "bnxt_debugfs.h"
Michael Chanc0c050c2015-10-22 16:01:17 -040068
69#define BNXT_TX_TIMEOUT (5 * HZ)
70
71static const char version[] =
72 "Broadcom NetXtreme-C/E driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION "\n";
73
74MODULE_LICENSE("GPL");
75MODULE_DESCRIPTION("Broadcom BCM573xx network driver");
76MODULE_VERSION(DRV_MODULE_VERSION);
77
78#define BNXT_RX_OFFSET (NET_SKB_PAD + NET_IP_ALIGN)
79#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
80#define BNXT_RX_COPY_THRESH 256
81
Michael Chan4419dbe2016-02-10 17:33:49 -050082#define BNXT_TX_PUSH_THRESH 164
Michael Chanc0c050c2015-10-22 16:01:17 -040083
84enum board_idx {
David Christensenfbc9a522015-12-27 18:19:29 -050085 BCM57301,
Michael Chanc0c050c2015-10-22 16:01:17 -040086 BCM57302,
87 BCM57304,
Michael Chan1f681682016-07-25 12:33:37 -040088 BCM57417_NPAR,
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040089 BCM58700,
Michael Chanb24eb6a2016-06-13 02:25:36 -040090 BCM57311,
91 BCM57312,
David Christensenfbc9a522015-12-27 18:19:29 -050092 BCM57402,
Michael Chanc0c050c2015-10-22 16:01:17 -040093 BCM57404,
94 BCM57406,
Michael Chan1f681682016-07-25 12:33:37 -040095 BCM57402_NPAR,
96 BCM57407,
Michael Chanb24eb6a2016-06-13 02:25:36 -040097 BCM57412,
98 BCM57414,
99 BCM57416,
100 BCM57417,
Michael Chan1f681682016-07-25 12:33:37 -0400101 BCM57412_NPAR,
Michael Chan5049e332016-05-15 03:04:50 -0400102 BCM57314,
Michael Chan1f681682016-07-25 12:33:37 -0400103 BCM57417_SFP,
104 BCM57416_SFP,
105 BCM57404_NPAR,
106 BCM57406_NPAR,
107 BCM57407_SFP,
Michael Chanadbc8302016-09-19 03:58:01 -0400108 BCM57407_NPAR,
Michael Chan1f681682016-07-25 12:33:37 -0400109 BCM57414_NPAR,
110 BCM57416_NPAR,
Deepak Khungar32b40792017-02-12 19:18:18 -0500111 BCM57452,
112 BCM57454,
Vasundhara Volam92abef32018-01-17 03:21:13 -0500113 BCM5745x_NPAR,
Michael Chan1ab968d2018-10-14 07:02:59 -0400114 BCM57508,
Ray Jui4a581392017-08-28 13:40:28 -0400115 BCM58802,
Ray Jui8ed693b2017-10-26 11:51:20 -0400116 BCM58804,
Ray Jui4a581392017-08-28 13:40:28 -0400117 BCM58808,
Michael Chanadbc8302016-09-19 03:58:01 -0400118 NETXTREME_E_VF,
119 NETXTREME_C_VF,
Rob Miller618784e2017-10-26 11:51:21 -0400120 NETXTREME_S_VF,
Michael Chanc0c050c2015-10-22 16:01:17 -0400121};
122
123/* indexed by enum above */
124static const struct {
125 char *name;
126} board_info[] = {
Scott Branden27573a72017-08-28 13:40:29 -0400127 [BCM57301] = { "Broadcom BCM57301 NetXtreme-C 10Gb Ethernet" },
128 [BCM57302] = { "Broadcom BCM57302 NetXtreme-C 10Gb/25Gb Ethernet" },
129 [BCM57304] = { "Broadcom BCM57304 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
130 [BCM57417_NPAR] = { "Broadcom BCM57417 NetXtreme-E Ethernet Partition" },
131 [BCM58700] = { "Broadcom BCM58700 Nitro 1Gb/2.5Gb/10Gb Ethernet" },
132 [BCM57311] = { "Broadcom BCM57311 NetXtreme-C 10Gb Ethernet" },
133 [BCM57312] = { "Broadcom BCM57312 NetXtreme-C 10Gb/25Gb Ethernet" },
134 [BCM57402] = { "Broadcom BCM57402 NetXtreme-E 10Gb Ethernet" },
135 [BCM57404] = { "Broadcom BCM57404 NetXtreme-E 10Gb/25Gb Ethernet" },
136 [BCM57406] = { "Broadcom BCM57406 NetXtreme-E 10GBase-T Ethernet" },
137 [BCM57402_NPAR] = { "Broadcom BCM57402 NetXtreme-E Ethernet Partition" },
138 [BCM57407] = { "Broadcom BCM57407 NetXtreme-E 10GBase-T Ethernet" },
139 [BCM57412] = { "Broadcom BCM57412 NetXtreme-E 10Gb Ethernet" },
140 [BCM57414] = { "Broadcom BCM57414 NetXtreme-E 10Gb/25Gb Ethernet" },
141 [BCM57416] = { "Broadcom BCM57416 NetXtreme-E 10GBase-T Ethernet" },
142 [BCM57417] = { "Broadcom BCM57417 NetXtreme-E 10GBase-T Ethernet" },
143 [BCM57412_NPAR] = { "Broadcom BCM57412 NetXtreme-E Ethernet Partition" },
144 [BCM57314] = { "Broadcom BCM57314 NetXtreme-C 10Gb/25Gb/40Gb/50Gb Ethernet" },
145 [BCM57417_SFP] = { "Broadcom BCM57417 NetXtreme-E 10Gb/25Gb Ethernet" },
146 [BCM57416_SFP] = { "Broadcom BCM57416 NetXtreme-E 10Gb Ethernet" },
147 [BCM57404_NPAR] = { "Broadcom BCM57404 NetXtreme-E Ethernet Partition" },
148 [BCM57406_NPAR] = { "Broadcom BCM57406 NetXtreme-E Ethernet Partition" },
149 [BCM57407_SFP] = { "Broadcom BCM57407 NetXtreme-E 25Gb Ethernet" },
150 [BCM57407_NPAR] = { "Broadcom BCM57407 NetXtreme-E Ethernet Partition" },
151 [BCM57414_NPAR] = { "Broadcom BCM57414 NetXtreme-E Ethernet Partition" },
152 [BCM57416_NPAR] = { "Broadcom BCM57416 NetXtreme-E Ethernet Partition" },
153 [BCM57452] = { "Broadcom BCM57452 NetXtreme-E 10Gb/25Gb/40Gb/50Gb Ethernet" },
154 [BCM57454] = { "Broadcom BCM57454 NetXtreme-E 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
Vasundhara Volam92abef32018-01-17 03:21:13 -0500155 [BCM5745x_NPAR] = { "Broadcom BCM5745x NetXtreme-E Ethernet Partition" },
Michael Chan1ab968d2018-10-14 07:02:59 -0400156 [BCM57508] = { "Broadcom BCM57508 NetXtreme-E 10Gb/25Gb/50Gb/100Gb/200Gb Ethernet" },
Scott Branden27573a72017-08-28 13:40:29 -0400157 [BCM58802] = { "Broadcom BCM58802 NetXtreme-S 10Gb/25Gb/40Gb/50Gb Ethernet" },
Ray Jui8ed693b2017-10-26 11:51:20 -0400158 [BCM58804] = { "Broadcom BCM58804 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
Scott Branden27573a72017-08-28 13:40:29 -0400159 [BCM58808] = { "Broadcom BCM58808 NetXtreme-S 10Gb/25Gb/40Gb/50Gb/100Gb Ethernet" },
160 [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" },
161 [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" },
Rob Miller618784e2017-10-26 11:51:21 -0400162 [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" },
Michael Chanc0c050c2015-10-22 16:01:17 -0400163};
164
165static const struct pci_device_id bnxt_pci_tbl[] = {
Vasundhara Volam92abef32018-01-17 03:21:13 -0500166 { PCI_VDEVICE(BROADCOM, 0x1604), .driver_data = BCM5745x_NPAR },
167 { PCI_VDEVICE(BROADCOM, 0x1605), .driver_data = BCM5745x_NPAR },
Ray Jui4a581392017-08-28 13:40:28 -0400168 { PCI_VDEVICE(BROADCOM, 0x1614), .driver_data = BCM57454 },
Michael Chanadbc8302016-09-19 03:58:01 -0400169 { PCI_VDEVICE(BROADCOM, 0x16c0), .driver_data = BCM57417_NPAR },
David Christensenfbc9a522015-12-27 18:19:29 -0500170 { PCI_VDEVICE(BROADCOM, 0x16c8), .driver_data = BCM57301 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400171 { PCI_VDEVICE(BROADCOM, 0x16c9), .driver_data = BCM57302 },
172 { PCI_VDEVICE(BROADCOM, 0x16ca), .driver_data = BCM57304 },
Michael Chan1f681682016-07-25 12:33:37 -0400173 { PCI_VDEVICE(BROADCOM, 0x16cc), .driver_data = BCM57417_NPAR },
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -0400174 { PCI_VDEVICE(BROADCOM, 0x16cd), .driver_data = BCM58700 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400175 { PCI_VDEVICE(BROADCOM, 0x16ce), .driver_data = BCM57311 },
176 { PCI_VDEVICE(BROADCOM, 0x16cf), .driver_data = BCM57312 },
David Christensenfbc9a522015-12-27 18:19:29 -0500177 { PCI_VDEVICE(BROADCOM, 0x16d0), .driver_data = BCM57402 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400178 { PCI_VDEVICE(BROADCOM, 0x16d1), .driver_data = BCM57404 },
179 { PCI_VDEVICE(BROADCOM, 0x16d2), .driver_data = BCM57406 },
Michael Chan1f681682016-07-25 12:33:37 -0400180 { PCI_VDEVICE(BROADCOM, 0x16d4), .driver_data = BCM57402_NPAR },
181 { PCI_VDEVICE(BROADCOM, 0x16d5), .driver_data = BCM57407 },
Michael Chanb24eb6a2016-06-13 02:25:36 -0400182 { PCI_VDEVICE(BROADCOM, 0x16d6), .driver_data = BCM57412 },
183 { PCI_VDEVICE(BROADCOM, 0x16d7), .driver_data = BCM57414 },
184 { PCI_VDEVICE(BROADCOM, 0x16d8), .driver_data = BCM57416 },
185 { PCI_VDEVICE(BROADCOM, 0x16d9), .driver_data = BCM57417 },
Michael Chan1f681682016-07-25 12:33:37 -0400186 { PCI_VDEVICE(BROADCOM, 0x16de), .driver_data = BCM57412_NPAR },
Michael Chan5049e332016-05-15 03:04:50 -0400187 { PCI_VDEVICE(BROADCOM, 0x16df), .driver_data = BCM57314 },
Michael Chan1f681682016-07-25 12:33:37 -0400188 { PCI_VDEVICE(BROADCOM, 0x16e2), .driver_data = BCM57417_SFP },
189 { PCI_VDEVICE(BROADCOM, 0x16e3), .driver_data = BCM57416_SFP },
190 { PCI_VDEVICE(BROADCOM, 0x16e7), .driver_data = BCM57404_NPAR },
191 { PCI_VDEVICE(BROADCOM, 0x16e8), .driver_data = BCM57406_NPAR },
192 { PCI_VDEVICE(BROADCOM, 0x16e9), .driver_data = BCM57407_SFP },
Michael Chanadbc8302016-09-19 03:58:01 -0400193 { PCI_VDEVICE(BROADCOM, 0x16ea), .driver_data = BCM57407_NPAR },
194 { PCI_VDEVICE(BROADCOM, 0x16eb), .driver_data = BCM57412_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400195 { PCI_VDEVICE(BROADCOM, 0x16ec), .driver_data = BCM57414_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400196 { PCI_VDEVICE(BROADCOM, 0x16ed), .driver_data = BCM57414_NPAR },
Michael Chan1f681682016-07-25 12:33:37 -0400197 { PCI_VDEVICE(BROADCOM, 0x16ee), .driver_data = BCM57416_NPAR },
Michael Chanadbc8302016-09-19 03:58:01 -0400198 { PCI_VDEVICE(BROADCOM, 0x16ef), .driver_data = BCM57416_NPAR },
Ray Jui4a581392017-08-28 13:40:28 -0400199 { PCI_VDEVICE(BROADCOM, 0x16f0), .driver_data = BCM58808 },
Deepak Khungar32b40792017-02-12 19:18:18 -0500200 { PCI_VDEVICE(BROADCOM, 0x16f1), .driver_data = BCM57452 },
Michael Chan1ab968d2018-10-14 07:02:59 -0400201 { PCI_VDEVICE(BROADCOM, 0x1750), .driver_data = BCM57508 },
Ray Jui4a581392017-08-28 13:40:28 -0400202 { PCI_VDEVICE(BROADCOM, 0xd802), .driver_data = BCM58802 },
Ray Jui8ed693b2017-10-26 11:51:20 -0400203 { PCI_VDEVICE(BROADCOM, 0xd804), .driver_data = BCM58804 },
Michael Chanc0c050c2015-10-22 16:01:17 -0400204#ifdef CONFIG_BNXT_SRIOV
Deepak Khungarc7ef35e2017-05-29 19:06:05 -0400205 { PCI_VDEVICE(BROADCOM, 0x1606), .driver_data = NETXTREME_E_VF },
206 { PCI_VDEVICE(BROADCOM, 0x1609), .driver_data = NETXTREME_E_VF },
Michael Chanadbc8302016-09-19 03:58:01 -0400207 { PCI_VDEVICE(BROADCOM, 0x16c1), .driver_data = NETXTREME_E_VF },
208 { PCI_VDEVICE(BROADCOM, 0x16cb), .driver_data = NETXTREME_C_VF },
209 { PCI_VDEVICE(BROADCOM, 0x16d3), .driver_data = NETXTREME_E_VF },
210 { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF },
211 { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF },
212 { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF },
Rob Miller618784e2017-10-26 11:51:21 -0400213 { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF },
Michael Chanc0c050c2015-10-22 16:01:17 -0400214#endif
215 { 0 }
216};
217
218MODULE_DEVICE_TABLE(pci, bnxt_pci_tbl);
219
220static const u16 bnxt_vf_req_snif[] = {
221 HWRM_FUNC_CFG,
Vasundhara Volam91cdda42018-01-17 03:21:14 -0500222 HWRM_FUNC_VF_CFG,
Michael Chanc0c050c2015-10-22 16:01:17 -0400223 HWRM_PORT_PHY_QCFG,
224 HWRM_CFA_L2_FILTER_ALLOC,
225};
226
Michael Chan25be8622016-04-05 14:09:00 -0400227static const u16 bnxt_async_events_arr[] = {
Michael Chan87c374d2016-12-02 21:17:16 -0500228 ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE,
229 ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD,
230 ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED,
231 ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE,
232 ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE,
Michael Chan25be8622016-04-05 14:09:00 -0400233};
234
Michael Chanc213eae2017-10-13 21:09:29 -0400235static struct workqueue_struct *bnxt_pf_wq;
236
Michael Chanc0c050c2015-10-22 16:01:17 -0400237static bool bnxt_vf_pciid(enum board_idx idx)
238{
Rob Miller618784e2017-10-26 11:51:21 -0400239 return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF ||
240 idx == NETXTREME_S_VF);
Michael Chanc0c050c2015-10-22 16:01:17 -0400241}
242
243#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID)
244#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS)
245#define DB_CP_IRQ_DIS_FLAGS (DB_KEY_CP | DB_IRQ_DIS)
246
Michael Chanc0c050c2015-10-22 16:01:17 -0400247#define BNXT_CP_DB_IRQ_DIS(db) \
248 writel(DB_CP_IRQ_DIS_FLAGS, db)
249
Michael Chan697197e2018-10-14 07:02:46 -0400250#define BNXT_DB_CQ(db, idx) \
251 writel(DB_CP_FLAGS | RING_CMP(idx), (db)->doorbell)
252
253#define BNXT_DB_NQ_P5(db, idx) \
254 writeq((db)->db_key64 | DBR_TYPE_NQ | RING_CMP(idx), (db)->doorbell)
255
256#define BNXT_DB_CQ_ARM(db, idx) \
257 writel(DB_CP_REARM_FLAGS | RING_CMP(idx), (db)->doorbell)
258
259#define BNXT_DB_NQ_ARM_P5(db, idx) \
260 writeq((db)->db_key64 | DBR_TYPE_NQ_ARM | RING_CMP(idx), (db)->doorbell)
261
262static void bnxt_db_nq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
263{
264 if (bp->flags & BNXT_FLAG_CHIP_P5)
265 BNXT_DB_NQ_P5(db, idx);
266 else
267 BNXT_DB_CQ(db, idx);
268}
269
270static void bnxt_db_nq_arm(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
271{
272 if (bp->flags & BNXT_FLAG_CHIP_P5)
273 BNXT_DB_NQ_ARM_P5(db, idx);
274 else
275 BNXT_DB_CQ_ARM(db, idx);
276}
277
278static void bnxt_db_cq(struct bnxt *bp, struct bnxt_db_info *db, u32 idx)
279{
280 if (bp->flags & BNXT_FLAG_CHIP_P5)
281 writeq(db->db_key64 | DBR_TYPE_CQ_ARMALL | RING_CMP(idx),
282 db->doorbell);
283 else
284 BNXT_DB_CQ(db, idx);
285}
286
Michael Chan38413402017-02-06 16:55:43 -0500287const u16 bnxt_lhint_arr[] = {
Michael Chanc0c050c2015-10-22 16:01:17 -0400288 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
289 TX_BD_FLAGS_LHINT_512_TO_1023,
290 TX_BD_FLAGS_LHINT_1024_TO_2047,
291 TX_BD_FLAGS_LHINT_1024_TO_2047,
292 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
293 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
294 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
295 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
296 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
297 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
298 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
299 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
300 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
301 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
302 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
303 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
304 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
305 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
306 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
307};
308
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400309static u16 bnxt_xmit_get_cfa_action(struct sk_buff *skb)
310{
311 struct metadata_dst *md_dst = skb_metadata_dst(skb);
312
313 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
314 return 0;
315
316 return md_dst->u.port_info.port_id;
317}
318
Michael Chanc0c050c2015-10-22 16:01:17 -0400319static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
320{
321 struct bnxt *bp = netdev_priv(dev);
322 struct tx_bd *txbd;
323 struct tx_bd_ext *txbd1;
324 struct netdev_queue *txq;
325 int i;
326 dma_addr_t mapping;
327 unsigned int length, pad = 0;
328 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
329 u16 prod, last_frag;
330 struct pci_dev *pdev = bp->pdev;
Michael Chanc0c050c2015-10-22 16:01:17 -0400331 struct bnxt_tx_ring_info *txr;
332 struct bnxt_sw_tx_bd *tx_buf;
333
334 i = skb_get_queue_mapping(skb);
335 if (unlikely(i >= bp->tx_nr_rings)) {
336 dev_kfree_skb_any(skb);
337 return NETDEV_TX_OK;
338 }
339
Michael Chanc0c050c2015-10-22 16:01:17 -0400340 txq = netdev_get_tx_queue(dev, i);
Michael Chana960dec2017-02-06 16:55:39 -0500341 txr = &bp->tx_ring[bp->tx_ring_map[i]];
Michael Chanc0c050c2015-10-22 16:01:17 -0400342 prod = txr->tx_prod;
343
344 free_size = bnxt_tx_avail(bp, txr);
345 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
346 netif_tx_stop_queue(txq);
347 return NETDEV_TX_BUSY;
348 }
349
350 length = skb->len;
351 len = skb_headlen(skb);
352 last_frag = skb_shinfo(skb)->nr_frags;
353
354 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
355
356 txbd->tx_bd_opaque = prod;
357
358 tx_buf = &txr->tx_buf_ring[prod];
359 tx_buf->skb = skb;
360 tx_buf->nr_frags = last_frag;
361
362 vlan_tag_flags = 0;
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400363 cfa_action = bnxt_xmit_get_cfa_action(skb);
Michael Chanc0c050c2015-10-22 16:01:17 -0400364 if (skb_vlan_tag_present(skb)) {
365 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
366 skb_vlan_tag_get(skb);
367 /* Currently supports 8021Q, 8021AD vlan offloads
368 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
369 */
370 if (skb->vlan_proto == htons(ETH_P_8021Q))
371 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
372 }
373
374 if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
Michael Chan4419dbe2016-02-10 17:33:49 -0500375 struct tx_push_buffer *tx_push_buf = txr->tx_push;
376 struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
377 struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
Michael Chan697197e2018-10-14 07:02:46 -0400378 void __iomem *db = txr->tx_db.doorbell;
Michael Chan4419dbe2016-02-10 17:33:49 -0500379 void *pdata = tx_push_buf->data;
380 u64 *end;
381 int j, push_len;
Michael Chanc0c050c2015-10-22 16:01:17 -0400382
383 /* Set COAL_NOW to be ready quickly for the next push */
384 tx_push->tx_bd_len_flags_type =
385 cpu_to_le32((length << TX_BD_LEN_SHIFT) |
386 TX_BD_TYPE_LONG_TX_BD |
387 TX_BD_FLAGS_LHINT_512_AND_SMALLER |
388 TX_BD_FLAGS_COAL_NOW |
389 TX_BD_FLAGS_PACKET_END |
390 (2 << TX_BD_FLAGS_BD_CNT_SHIFT));
391
392 if (skb->ip_summed == CHECKSUM_PARTIAL)
393 tx_push1->tx_bd_hsize_lflags =
394 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
395 else
396 tx_push1->tx_bd_hsize_lflags = 0;
397
398 tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400399 tx_push1->tx_bd_cfa_action =
400 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
Michael Chanc0c050c2015-10-22 16:01:17 -0400401
Michael Chanfbb0fa82016-02-22 02:10:26 -0500402 end = pdata + length;
403 end = PTR_ALIGN(end, 8) - 1;
Michael Chan4419dbe2016-02-10 17:33:49 -0500404 *end = 0;
405
Michael Chanc0c050c2015-10-22 16:01:17 -0400406 skb_copy_from_linear_data(skb, pdata, len);
407 pdata += len;
408 for (j = 0; j < last_frag; j++) {
409 skb_frag_t *frag = &skb_shinfo(skb)->frags[j];
410 void *fptr;
411
412 fptr = skb_frag_address_safe(frag);
413 if (!fptr)
414 goto normal_tx;
415
416 memcpy(pdata, fptr, skb_frag_size(frag));
417 pdata += skb_frag_size(frag);
418 }
419
Michael Chan4419dbe2016-02-10 17:33:49 -0500420 txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
421 txbd->tx_bd_haddr = txr->data_mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400422 prod = NEXT_TX(prod);
423 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
424 memcpy(txbd, tx_push1, sizeof(*txbd));
425 prod = NEXT_TX(prod);
Michael Chan4419dbe2016-02-10 17:33:49 -0500426 tx_push->doorbell =
Michael Chanc0c050c2015-10-22 16:01:17 -0400427 cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
428 txr->tx_prod = prod;
429
Michael Chanb9a84602016-06-06 02:37:14 -0400430 tx_buf->is_push = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -0400431 netdev_tx_sent_queue(txq, skb->len);
Michael Chanb9a84602016-06-06 02:37:14 -0400432 wmb(); /* Sync is_push and byte queue before pushing data */
Michael Chanc0c050c2015-10-22 16:01:17 -0400433
Michael Chan4419dbe2016-02-10 17:33:49 -0500434 push_len = (length + sizeof(*tx_push) + 7) / 8;
435 if (push_len > 16) {
Michael Chan697197e2018-10-14 07:02:46 -0400436 __iowrite64_copy(db, tx_push_buf, 16);
437 __iowrite32_copy(db + 4, tx_push_buf + 1,
Michael Chan9d137442016-09-05 01:57:35 -0400438 (push_len - 16) << 1);
Michael Chan4419dbe2016-02-10 17:33:49 -0500439 } else {
Michael Chan697197e2018-10-14 07:02:46 -0400440 __iowrite64_copy(db, tx_push_buf, push_len);
Michael Chan4419dbe2016-02-10 17:33:49 -0500441 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400442
Michael Chanc0c050c2015-10-22 16:01:17 -0400443 goto tx_done;
444 }
445
446normal_tx:
447 if (length < BNXT_MIN_PKT_SIZE) {
448 pad = BNXT_MIN_PKT_SIZE - length;
449 if (skb_pad(skb, pad)) {
450 /* SKB already freed. */
451 tx_buf->skb = NULL;
452 return NETDEV_TX_OK;
453 }
454 length = BNXT_MIN_PKT_SIZE;
455 }
456
457 mapping = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
458
459 if (unlikely(dma_mapping_error(&pdev->dev, mapping))) {
460 dev_kfree_skb_any(skb);
461 tx_buf->skb = NULL;
462 return NETDEV_TX_OK;
463 }
464
465 dma_unmap_addr_set(tx_buf, mapping, mapping);
466 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
467 ((last_frag + 2) << TX_BD_FLAGS_BD_CNT_SHIFT);
468
469 txbd->tx_bd_haddr = cpu_to_le64(mapping);
470
471 prod = NEXT_TX(prod);
472 txbd1 = (struct tx_bd_ext *)
473 &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
474
475 txbd1->tx_bd_hsize_lflags = 0;
476 if (skb_is_gso(skb)) {
477 u32 hdr_len;
478
479 if (skb->encapsulation)
480 hdr_len = skb_inner_network_offset(skb) +
481 skb_inner_network_header_len(skb) +
482 inner_tcp_hdrlen(skb);
483 else
484 hdr_len = skb_transport_offset(skb) +
485 tcp_hdrlen(skb);
486
487 txbd1->tx_bd_hsize_lflags = cpu_to_le32(TX_BD_FLAGS_LSO |
488 TX_BD_FLAGS_T_IPID |
489 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
490 length = skb_shinfo(skb)->gso_size;
491 txbd1->tx_bd_mss = cpu_to_le32(length);
492 length += hdr_len;
493 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
494 txbd1->tx_bd_hsize_lflags =
495 cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
496 txbd1->tx_bd_mss = 0;
497 }
498
499 length >>= 9;
500 flags |= bnxt_lhint_arr[length];
501 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
502
503 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
Sathya Perlaee5c7fb2017-07-24 12:34:28 -0400504 txbd1->tx_bd_cfa_action =
505 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
Michael Chanc0c050c2015-10-22 16:01:17 -0400506 for (i = 0; i < last_frag; i++) {
507 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
508
509 prod = NEXT_TX(prod);
510 txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
511
512 len = skb_frag_size(frag);
513 mapping = skb_frag_dma_map(&pdev->dev, frag, 0, len,
514 DMA_TO_DEVICE);
515
516 if (unlikely(dma_mapping_error(&pdev->dev, mapping)))
517 goto tx_dma_error;
518
519 tx_buf = &txr->tx_buf_ring[prod];
520 dma_unmap_addr_set(tx_buf, mapping, mapping);
521
522 txbd->tx_bd_haddr = cpu_to_le64(mapping);
523
524 flags = len << TX_BD_LEN_SHIFT;
525 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
526 }
527
528 flags &= ~TX_BD_LEN;
529 txbd->tx_bd_len_flags_type =
530 cpu_to_le32(((len + pad) << TX_BD_LEN_SHIFT) | flags |
531 TX_BD_FLAGS_PACKET_END);
532
533 netdev_tx_sent_queue(txq, skb->len);
534
535 /* Sync BD data before updating doorbell */
536 wmb();
537
538 prod = NEXT_TX(prod);
539 txr->tx_prod = prod;
540
Michael Chanffe40642017-05-30 20:03:00 -0400541 if (!skb->xmit_more || netif_xmit_stopped(txq))
Michael Chan697197e2018-10-14 07:02:46 -0400542 bnxt_db_write(bp, &txr->tx_db, prod);
Michael Chanc0c050c2015-10-22 16:01:17 -0400543
544tx_done:
545
546 mmiowb();
547
548 if (unlikely(bnxt_tx_avail(bp, txr) <= MAX_SKB_FRAGS + 1)) {
Michael Chan4d172f22017-05-29 19:06:09 -0400549 if (skb->xmit_more && !tx_buf->is_push)
Michael Chan697197e2018-10-14 07:02:46 -0400550 bnxt_db_write(bp, &txr->tx_db, prod);
Michael Chan4d172f22017-05-29 19:06:09 -0400551
Michael Chanc0c050c2015-10-22 16:01:17 -0400552 netif_tx_stop_queue(txq);
553
554 /* netif_tx_stop_queue() must be done before checking
555 * tx index in bnxt_tx_avail() below, because in
556 * bnxt_tx_int(), we update tx index before checking for
557 * netif_tx_queue_stopped().
558 */
559 smp_mb();
560 if (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)
561 netif_tx_wake_queue(txq);
562 }
563 return NETDEV_TX_OK;
564
565tx_dma_error:
566 last_frag = i;
567
568 /* start back at beginning and unmap skb */
569 prod = txr->tx_prod;
570 tx_buf = &txr->tx_buf_ring[prod];
571 tx_buf->skb = NULL;
572 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
573 skb_headlen(skb), PCI_DMA_TODEVICE);
574 prod = NEXT_TX(prod);
575
576 /* unmap remaining mapped pages */
577 for (i = 0; i < last_frag; i++) {
578 prod = NEXT_TX(prod);
579 tx_buf = &txr->tx_buf_ring[prod];
580 dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
581 skb_frag_size(&skb_shinfo(skb)->frags[i]),
582 PCI_DMA_TODEVICE);
583 }
584
585 dev_kfree_skb_any(skb);
586 return NETDEV_TX_OK;
587}
588
589static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts)
590{
Michael Chanb6ab4b02016-01-02 23:44:59 -0500591 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chana960dec2017-02-06 16:55:39 -0500592 struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index);
Michael Chanc0c050c2015-10-22 16:01:17 -0400593 u16 cons = txr->tx_cons;
594 struct pci_dev *pdev = bp->pdev;
595 int i;
596 unsigned int tx_bytes = 0;
597
598 for (i = 0; i < nr_pkts; i++) {
599 struct bnxt_sw_tx_bd *tx_buf;
600 struct sk_buff *skb;
601 int j, last;
602
603 tx_buf = &txr->tx_buf_ring[cons];
604 cons = NEXT_TX(cons);
605 skb = tx_buf->skb;
606 tx_buf->skb = NULL;
607
608 if (tx_buf->is_push) {
609 tx_buf->is_push = 0;
610 goto next_tx_int;
611 }
612
613 dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping),
614 skb_headlen(skb), PCI_DMA_TODEVICE);
615 last = tx_buf->nr_frags;
616
617 for (j = 0; j < last; j++) {
618 cons = NEXT_TX(cons);
619 tx_buf = &txr->tx_buf_ring[cons];
620 dma_unmap_page(
621 &pdev->dev,
622 dma_unmap_addr(tx_buf, mapping),
623 skb_frag_size(&skb_shinfo(skb)->frags[j]),
624 PCI_DMA_TODEVICE);
625 }
626
627next_tx_int:
628 cons = NEXT_TX(cons);
629
630 tx_bytes += skb->len;
631 dev_kfree_skb_any(skb);
632 }
633
634 netdev_tx_completed_queue(txq, nr_pkts, tx_bytes);
635 txr->tx_cons = cons;
636
637 /* Need to make the tx_cons update visible to bnxt_start_xmit()
638 * before checking for netif_tx_queue_stopped(). Without the
639 * memory barrier, there is a small possibility that bnxt_start_xmit()
640 * will miss it and cause the queue to be stopped forever.
641 */
642 smp_mb();
643
644 if (unlikely(netif_tx_queue_stopped(txq)) &&
645 (bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
646 __netif_tx_lock(txq, smp_processor_id());
647 if (netif_tx_queue_stopped(txq) &&
648 bnxt_tx_avail(bp, txr) > bp->tx_wake_thresh &&
649 txr->dev_state != BNXT_DEV_STATE_CLOSING)
650 netif_tx_wake_queue(txq);
651 __netif_tx_unlock(txq);
652 }
653}
654
Michael Chanc61fb992017-02-06 16:55:36 -0500655static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping,
656 gfp_t gfp)
657{
658 struct device *dev = &bp->pdev->dev;
659 struct page *page;
660
661 page = alloc_page(gfp);
662 if (!page)
663 return NULL;
664
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700665 *mapping = dma_map_page_attrs(dev, page, 0, PAGE_SIZE, bp->rx_dir,
666 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -0500667 if (dma_mapping_error(dev, *mapping)) {
668 __free_page(page);
669 return NULL;
670 }
671 *mapping += bp->rx_dma_offset;
672 return page;
673}
674
Michael Chanc0c050c2015-10-22 16:01:17 -0400675static inline u8 *__bnxt_alloc_rx_data(struct bnxt *bp, dma_addr_t *mapping,
676 gfp_t gfp)
677{
678 u8 *data;
679 struct pci_dev *pdev = bp->pdev;
680
681 data = kmalloc(bp->rx_buf_size, gfp);
682 if (!data)
683 return NULL;
684
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700685 *mapping = dma_map_single_attrs(&pdev->dev, data + bp->rx_dma_offset,
686 bp->rx_buf_use_size, bp->rx_dir,
687 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400688
689 if (dma_mapping_error(&pdev->dev, *mapping)) {
690 kfree(data);
691 data = NULL;
692 }
693 return data;
694}
695
Michael Chan38413402017-02-06 16:55:43 -0500696int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
697 u16 prod, gfp_t gfp)
Michael Chanc0c050c2015-10-22 16:01:17 -0400698{
699 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
700 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod];
Michael Chanc0c050c2015-10-22 16:01:17 -0400701 dma_addr_t mapping;
702
Michael Chanc61fb992017-02-06 16:55:36 -0500703 if (BNXT_RX_PAGE_MODE(bp)) {
704 struct page *page = __bnxt_alloc_rx_page(bp, &mapping, gfp);
Michael Chanc0c050c2015-10-22 16:01:17 -0400705
Michael Chanc61fb992017-02-06 16:55:36 -0500706 if (!page)
707 return -ENOMEM;
708
709 rx_buf->data = page;
710 rx_buf->data_ptr = page_address(page) + bp->rx_offset;
711 } else {
712 u8 *data = __bnxt_alloc_rx_data(bp, &mapping, gfp);
713
714 if (!data)
715 return -ENOMEM;
716
717 rx_buf->data = data;
718 rx_buf->data_ptr = data + bp->rx_offset;
719 }
Michael Chan11cd1192017-02-06 16:55:33 -0500720 rx_buf->mapping = mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400721
722 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -0400723 return 0;
724}
725
Michael Chanc6d30e82017-02-06 16:55:42 -0500726void bnxt_reuse_rx_data(struct bnxt_rx_ring_info *rxr, u16 cons, void *data)
Michael Chanc0c050c2015-10-22 16:01:17 -0400727{
728 u16 prod = rxr->rx_prod;
729 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
730 struct rx_bd *cons_bd, *prod_bd;
731
732 prod_rx_buf = &rxr->rx_buf_ring[prod];
733 cons_rx_buf = &rxr->rx_buf_ring[cons];
734
735 prod_rx_buf->data = data;
Michael Chan6bb19472017-02-06 16:55:32 -0500736 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -0400737
Michael Chan11cd1192017-02-06 16:55:33 -0500738 prod_rx_buf->mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400739
740 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
741 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
742
743 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
744}
745
746static inline u16 bnxt_find_next_agg_idx(struct bnxt_rx_ring_info *rxr, u16 idx)
747{
748 u16 next, max = rxr->rx_agg_bmap_size;
749
750 next = find_next_zero_bit(rxr->rx_agg_bmap, max, idx);
751 if (next >= max)
752 next = find_first_zero_bit(rxr->rx_agg_bmap, max);
753 return next;
754}
755
756static inline int bnxt_alloc_rx_page(struct bnxt *bp,
757 struct bnxt_rx_ring_info *rxr,
758 u16 prod, gfp_t gfp)
759{
760 struct rx_bd *rxbd =
761 &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
762 struct bnxt_sw_rx_agg_bd *rx_agg_buf;
763 struct pci_dev *pdev = bp->pdev;
764 struct page *page;
765 dma_addr_t mapping;
766 u16 sw_prod = rxr->rx_sw_agg_prod;
Michael Chan89d0a062016-04-25 02:30:51 -0400767 unsigned int offset = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -0400768
Michael Chan89d0a062016-04-25 02:30:51 -0400769 if (PAGE_SIZE > BNXT_RX_PAGE_SIZE) {
770 page = rxr->rx_page;
771 if (!page) {
772 page = alloc_page(gfp);
773 if (!page)
774 return -ENOMEM;
775 rxr->rx_page = page;
776 rxr->rx_page_offset = 0;
777 }
778 offset = rxr->rx_page_offset;
779 rxr->rx_page_offset += BNXT_RX_PAGE_SIZE;
780 if (rxr->rx_page_offset == PAGE_SIZE)
781 rxr->rx_page = NULL;
782 else
783 get_page(page);
784 } else {
785 page = alloc_page(gfp);
786 if (!page)
787 return -ENOMEM;
788 }
Michael Chanc0c050c2015-10-22 16:01:17 -0400789
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700790 mapping = dma_map_page_attrs(&pdev->dev, page, offset,
791 BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE,
792 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400793 if (dma_mapping_error(&pdev->dev, mapping)) {
794 __free_page(page);
795 return -EIO;
796 }
797
798 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
799 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
800
801 __set_bit(sw_prod, rxr->rx_agg_bmap);
802 rx_agg_buf = &rxr->rx_agg_ring[sw_prod];
803 rxr->rx_sw_agg_prod = NEXT_RX_AGG(sw_prod);
804
805 rx_agg_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400806 rx_agg_buf->offset = offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400807 rx_agg_buf->mapping = mapping;
808 rxbd->rx_bd_haddr = cpu_to_le64(mapping);
809 rxbd->rx_bd_opaque = sw_prod;
810 return 0;
811}
812
Michael Chane44758b2018-10-14 07:02:55 -0400813static void bnxt_reuse_rx_agg_bufs(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
Michael Chanc0c050c2015-10-22 16:01:17 -0400814 u32 agg_bufs)
815{
Michael Chane44758b2018-10-14 07:02:55 -0400816 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -0400817 struct bnxt *bp = bnapi->bp;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500818 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400819 u16 prod = rxr->rx_agg_prod;
820 u16 sw_prod = rxr->rx_sw_agg_prod;
821 u32 i;
822
823 for (i = 0; i < agg_bufs; i++) {
824 u16 cons;
825 struct rx_agg_cmp *agg;
826 struct bnxt_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
827 struct rx_bd *prod_bd;
828 struct page *page;
829
830 agg = (struct rx_agg_cmp *)
831 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
832 cons = agg->rx_agg_cmp_opaque;
833 __clear_bit(cons, rxr->rx_agg_bmap);
834
835 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
836 sw_prod = bnxt_find_next_agg_idx(rxr, sw_prod);
837
838 __set_bit(sw_prod, rxr->rx_agg_bmap);
839 prod_rx_buf = &rxr->rx_agg_ring[sw_prod];
840 cons_rx_buf = &rxr->rx_agg_ring[cons];
841
842 /* It is possible for sw_prod to be equal to cons, so
843 * set cons_rx_buf->page to NULL first.
844 */
845 page = cons_rx_buf->page;
846 cons_rx_buf->page = NULL;
847 prod_rx_buf->page = page;
Michael Chan89d0a062016-04-25 02:30:51 -0400848 prod_rx_buf->offset = cons_rx_buf->offset;
Michael Chanc0c050c2015-10-22 16:01:17 -0400849
850 prod_rx_buf->mapping = cons_rx_buf->mapping;
851
852 prod_bd = &rxr->rx_agg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
853
854 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
855 prod_bd->rx_bd_opaque = sw_prod;
856
857 prod = NEXT_RX_AGG(prod);
858 sw_prod = NEXT_RX_AGG(sw_prod);
859 cp_cons = NEXT_CMP(cp_cons);
860 }
861 rxr->rx_agg_prod = prod;
862 rxr->rx_sw_agg_prod = sw_prod;
863}
864
Michael Chanc61fb992017-02-06 16:55:36 -0500865static struct sk_buff *bnxt_rx_page_skb(struct bnxt *bp,
866 struct bnxt_rx_ring_info *rxr,
867 u16 cons, void *data, u8 *data_ptr,
868 dma_addr_t dma_addr,
869 unsigned int offset_and_len)
870{
871 unsigned int payload = offset_and_len >> 16;
872 unsigned int len = offset_and_len & 0xffff;
873 struct skb_frag_struct *frag;
874 struct page *page = data;
875 u16 prod = rxr->rx_prod;
876 struct sk_buff *skb;
877 int off, err;
878
879 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
880 if (unlikely(err)) {
881 bnxt_reuse_rx_data(rxr, cons, data);
882 return NULL;
883 }
884 dma_addr -= bp->rx_dma_offset;
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700885 dma_unmap_page_attrs(&bp->pdev->dev, dma_addr, PAGE_SIZE, bp->rx_dir,
886 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -0500887
888 if (unlikely(!payload))
889 payload = eth_get_headlen(data_ptr, len);
890
891 skb = napi_alloc_skb(&rxr->bnapi->napi, payload);
892 if (!skb) {
893 __free_page(page);
894 return NULL;
895 }
896
897 off = (void *)data_ptr - page_address(page);
898 skb_add_rx_frag(skb, 0, page, off, len, PAGE_SIZE);
899 memcpy(skb->data - NET_IP_ALIGN, data_ptr - NET_IP_ALIGN,
900 payload + NET_IP_ALIGN);
901
902 frag = &skb_shinfo(skb)->frags[0];
903 skb_frag_size_sub(frag, payload);
904 frag->page_offset += payload;
905 skb->data_len -= payload;
906 skb->tail += payload;
907
908 return skb;
909}
910
Michael Chanc0c050c2015-10-22 16:01:17 -0400911static struct sk_buff *bnxt_rx_skb(struct bnxt *bp,
912 struct bnxt_rx_ring_info *rxr, u16 cons,
Michael Chan6bb19472017-02-06 16:55:32 -0500913 void *data, u8 *data_ptr,
914 dma_addr_t dma_addr,
915 unsigned int offset_and_len)
Michael Chanc0c050c2015-10-22 16:01:17 -0400916{
Michael Chan6bb19472017-02-06 16:55:32 -0500917 u16 prod = rxr->rx_prod;
Michael Chanc0c050c2015-10-22 16:01:17 -0400918 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -0500919 int err;
Michael Chanc0c050c2015-10-22 16:01:17 -0400920
921 err = bnxt_alloc_rx_data(bp, rxr, prod, GFP_ATOMIC);
922 if (unlikely(err)) {
923 bnxt_reuse_rx_data(rxr, cons, data);
924 return NULL;
925 }
926
927 skb = build_skb(data, 0);
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700928 dma_unmap_single_attrs(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
929 bp->rx_dir, DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -0400930 if (!skb) {
931 kfree(data);
932 return NULL;
933 }
934
Michael Chanb3dba772017-02-06 16:55:35 -0500935 skb_reserve(skb, bp->rx_offset);
Michael Chan6bb19472017-02-06 16:55:32 -0500936 skb_put(skb, offset_and_len & 0xffff);
Michael Chanc0c050c2015-10-22 16:01:17 -0400937 return skb;
938}
939
Michael Chane44758b2018-10-14 07:02:55 -0400940static struct sk_buff *bnxt_rx_pages(struct bnxt *bp,
941 struct bnxt_cp_ring_info *cpr,
Michael Chanc0c050c2015-10-22 16:01:17 -0400942 struct sk_buff *skb, u16 cp_cons,
943 u32 agg_bufs)
944{
Michael Chane44758b2018-10-14 07:02:55 -0400945 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -0400946 struct pci_dev *pdev = bp->pdev;
Michael Chanb6ab4b02016-01-02 23:44:59 -0500947 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -0400948 u16 prod = rxr->rx_agg_prod;
949 u32 i;
950
951 for (i = 0; i < agg_bufs; i++) {
952 u16 cons, frag_len;
953 struct rx_agg_cmp *agg;
954 struct bnxt_sw_rx_agg_bd *cons_rx_buf;
955 struct page *page;
956 dma_addr_t mapping;
957
958 agg = (struct rx_agg_cmp *)
959 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
960 cons = agg->rx_agg_cmp_opaque;
961 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
962 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
963
964 cons_rx_buf = &rxr->rx_agg_ring[cons];
Michael Chan89d0a062016-04-25 02:30:51 -0400965 skb_fill_page_desc(skb, i, cons_rx_buf->page,
966 cons_rx_buf->offset, frag_len);
Michael Chanc0c050c2015-10-22 16:01:17 -0400967 __clear_bit(cons, rxr->rx_agg_bmap);
968
969 /* It is possible for bnxt_alloc_rx_page() to allocate
970 * a sw_prod index that equals the cons index, so we
971 * need to clear the cons entry now.
972 */
Michael Chan11cd1192017-02-06 16:55:33 -0500973 mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -0400974 page = cons_rx_buf->page;
975 cons_rx_buf->page = NULL;
976
977 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_ATOMIC) != 0) {
978 struct skb_shared_info *shinfo;
979 unsigned int nr_frags;
980
981 shinfo = skb_shinfo(skb);
982 nr_frags = --shinfo->nr_frags;
983 __skb_frag_set_page(&shinfo->frags[nr_frags], NULL);
984
985 dev_kfree_skb(skb);
986
987 cons_rx_buf->page = page;
988
989 /* Update prod since possibly some pages have been
990 * allocated already.
991 */
992 rxr->rx_agg_prod = prod;
Michael Chane44758b2018-10-14 07:02:55 -0400993 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs - i);
Michael Chanc0c050c2015-10-22 16:01:17 -0400994 return NULL;
995 }
996
Shannon Nelsonc519fe92017-05-09 18:30:12 -0700997 dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE,
998 PCI_DMA_FROMDEVICE,
999 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04001000
1001 skb->data_len += frag_len;
1002 skb->len += frag_len;
1003 skb->truesize += PAGE_SIZE;
1004
1005 prod = NEXT_RX_AGG(prod);
1006 cp_cons = NEXT_CMP(cp_cons);
1007 }
1008 rxr->rx_agg_prod = prod;
1009 return skb;
1010}
1011
1012static int bnxt_agg_bufs_valid(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1013 u8 agg_bufs, u32 *raw_cons)
1014{
1015 u16 last;
1016 struct rx_agg_cmp *agg;
1017
1018 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
1019 last = RING_CMP(*raw_cons);
1020 agg = (struct rx_agg_cmp *)
1021 &cpr->cp_desc_ring[CP_RING(last)][CP_IDX(last)];
1022 return RX_AGG_CMP_VALID(agg, *raw_cons);
1023}
1024
1025static inline struct sk_buff *bnxt_copy_skb(struct bnxt_napi *bnapi, u8 *data,
1026 unsigned int len,
1027 dma_addr_t mapping)
1028{
1029 struct bnxt *bp = bnapi->bp;
1030 struct pci_dev *pdev = bp->pdev;
1031 struct sk_buff *skb;
1032
1033 skb = napi_alloc_skb(&bnapi->napi, len);
1034 if (!skb)
1035 return NULL;
1036
Michael Chan745fc052017-02-06 16:55:34 -05001037 dma_sync_single_for_cpu(&pdev->dev, mapping, bp->rx_copy_thresh,
1038 bp->rx_dir);
Michael Chanc0c050c2015-10-22 16:01:17 -04001039
Michael Chan6bb19472017-02-06 16:55:32 -05001040 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
1041 len + NET_IP_ALIGN);
Michael Chanc0c050c2015-10-22 16:01:17 -04001042
Michael Chan745fc052017-02-06 16:55:34 -05001043 dma_sync_single_for_device(&pdev->dev, mapping, bp->rx_copy_thresh,
1044 bp->rx_dir);
Michael Chanc0c050c2015-10-22 16:01:17 -04001045
1046 skb_put(skb, len);
1047 return skb;
1048}
1049
Michael Chane44758b2018-10-14 07:02:55 -04001050static int bnxt_discard_rx(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
Michael Chanfa7e2812016-05-10 19:18:00 -04001051 u32 *raw_cons, void *cmp)
1052{
Michael Chanfa7e2812016-05-10 19:18:00 -04001053 struct rx_cmp *rxcmp = cmp;
1054 u32 tmp_raw_cons = *raw_cons;
1055 u8 cmp_type, agg_bufs = 0;
1056
1057 cmp_type = RX_CMP_TYPE(rxcmp);
1058
1059 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1060 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
1061 RX_CMP_AGG_BUFS) >>
1062 RX_CMP_AGG_BUFS_SHIFT;
1063 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1064 struct rx_tpa_end_cmp *tpa_end = cmp;
1065
1066 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1067 RX_TPA_END_CMP_AGG_BUFS) >>
1068 RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1069 }
1070
1071 if (agg_bufs) {
1072 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1073 return -EBUSY;
1074 }
1075 *raw_cons = tmp_raw_cons;
1076 return 0;
1077}
1078
Michael Chanc213eae2017-10-13 21:09:29 -04001079static void bnxt_queue_sp_work(struct bnxt *bp)
1080{
1081 if (BNXT_PF(bp))
1082 queue_work(bnxt_pf_wq, &bp->sp_task);
1083 else
1084 schedule_work(&bp->sp_task);
1085}
1086
1087static void bnxt_cancel_sp_work(struct bnxt *bp)
1088{
1089 if (BNXT_PF(bp))
1090 flush_workqueue(bnxt_pf_wq);
1091 else
1092 cancel_work_sync(&bp->sp_task);
1093}
1094
Michael Chanfa7e2812016-05-10 19:18:00 -04001095static void bnxt_sched_reset(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
1096{
1097 if (!rxr->bnapi->in_reset) {
1098 rxr->bnapi->in_reset = true;
1099 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04001100 bnxt_queue_sp_work(bp);
Michael Chanfa7e2812016-05-10 19:18:00 -04001101 }
1102 rxr->rx_next_cons = 0xffff;
1103}
1104
Michael Chanc0c050c2015-10-22 16:01:17 -04001105static void bnxt_tpa_start(struct bnxt *bp, struct bnxt_rx_ring_info *rxr,
1106 struct rx_tpa_start_cmp *tpa_start,
1107 struct rx_tpa_start_cmp_ext *tpa_start1)
1108{
1109 u8 agg_id = TPA_START_AGG_ID(tpa_start);
1110 u16 cons, prod;
1111 struct bnxt_tpa_info *tpa_info;
1112 struct bnxt_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
1113 struct rx_bd *prod_bd;
1114 dma_addr_t mapping;
1115
1116 cons = tpa_start->rx_tpa_start_cmp_opaque;
1117 prod = rxr->rx_prod;
1118 cons_rx_buf = &rxr->rx_buf_ring[cons];
1119 prod_rx_buf = &rxr->rx_buf_ring[prod];
1120 tpa_info = &rxr->rx_tpa[agg_id];
1121
Michael Chanfa7e2812016-05-10 19:18:00 -04001122 if (unlikely(cons != rxr->rx_next_cons)) {
1123 bnxt_sched_reset(bp, rxr);
1124 return;
1125 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001126 /* Store cfa_code in tpa_info to use in tpa_end
1127 * completion processing.
1128 */
1129 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
Michael Chanc0c050c2015-10-22 16:01:17 -04001130 prod_rx_buf->data = tpa_info->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001131 prod_rx_buf->data_ptr = tpa_info->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001132
1133 mapping = tpa_info->mapping;
Michael Chan11cd1192017-02-06 16:55:33 -05001134 prod_rx_buf->mapping = mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001135
1136 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1137
1138 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
1139
1140 tpa_info->data = cons_rx_buf->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001141 tpa_info->data_ptr = cons_rx_buf->data_ptr;
Michael Chanc0c050c2015-10-22 16:01:17 -04001142 cons_rx_buf->data = NULL;
Michael Chan11cd1192017-02-06 16:55:33 -05001143 tpa_info->mapping = cons_rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001144
1145 tpa_info->len =
1146 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
1147 RX_TPA_START_CMP_LEN_SHIFT;
1148 if (likely(TPA_START_HASH_VALID(tpa_start))) {
1149 u32 hash_type = TPA_START_HASH_TYPE(tpa_start);
1150
1151 tpa_info->hash_type = PKT_HASH_TYPE_L4;
1152 tpa_info->gso_type = SKB_GSO_TCPV4;
1153 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
Michael Chan50f011b2018-08-05 16:51:51 -04001154 if (hash_type == 3 || TPA_START_IS_IPV6(tpa_start1))
Michael Chanc0c050c2015-10-22 16:01:17 -04001155 tpa_info->gso_type = SKB_GSO_TCPV6;
1156 tpa_info->rss_hash =
1157 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
1158 } else {
1159 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
1160 tpa_info->gso_type = 0;
1161 if (netif_msg_rx_err(bp))
1162 netdev_warn(bp->dev, "TPA packet without valid hash\n");
1163 }
1164 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
1165 tpa_info->metadata = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
Michael Chan94758f82016-06-13 02:25:35 -04001166 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
Michael Chanc0c050c2015-10-22 16:01:17 -04001167
1168 rxr->rx_prod = NEXT_RX(prod);
1169 cons = NEXT_RX(cons);
Michael Chan376a5b82016-05-10 19:17:59 -04001170 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001171 cons_rx_buf = &rxr->rx_buf_ring[cons];
1172
1173 bnxt_reuse_rx_data(rxr, cons, cons_rx_buf->data);
1174 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
1175 cons_rx_buf->data = NULL;
1176}
1177
Michael Chane44758b2018-10-14 07:02:55 -04001178static void bnxt_abort_tpa(struct bnxt_cp_ring_info *cpr, u16 cp_cons,
1179 u32 agg_bufs)
Michael Chanc0c050c2015-10-22 16:01:17 -04001180{
1181 if (agg_bufs)
Michael Chane44758b2018-10-14 07:02:55 -04001182 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001183}
1184
Michael Chan94758f82016-06-13 02:25:35 -04001185static struct sk_buff *bnxt_gro_func_5731x(struct bnxt_tpa_info *tpa_info,
1186 int payload_off, int tcp_ts,
1187 struct sk_buff *skb)
1188{
1189#ifdef CONFIG_INET
1190 struct tcphdr *th;
1191 int len, nw_off;
1192 u16 outer_ip_off, inner_ip_off, inner_mac_off;
1193 u32 hdr_info = tpa_info->hdr_info;
1194 bool loopback = false;
1195
1196 inner_ip_off = BNXT_TPA_INNER_L3_OFF(hdr_info);
1197 inner_mac_off = BNXT_TPA_INNER_L2_OFF(hdr_info);
1198 outer_ip_off = BNXT_TPA_OUTER_L3_OFF(hdr_info);
1199
1200 /* If the packet is an internal loopback packet, the offsets will
1201 * have an extra 4 bytes.
1202 */
1203 if (inner_mac_off == 4) {
1204 loopback = true;
1205 } else if (inner_mac_off > 4) {
1206 __be16 proto = *((__be16 *)(skb->data + inner_ip_off -
1207 ETH_HLEN - 2));
1208
1209 /* We only support inner iPv4/ipv6. If we don't see the
1210 * correct protocol ID, it must be a loopback packet where
1211 * the offsets are off by 4.
1212 */
Dan Carpenter09a76362016-07-07 11:23:09 +03001213 if (proto != htons(ETH_P_IP) && proto != htons(ETH_P_IPV6))
Michael Chan94758f82016-06-13 02:25:35 -04001214 loopback = true;
1215 }
1216 if (loopback) {
1217 /* internal loopback packet, subtract all offsets by 4 */
1218 inner_ip_off -= 4;
1219 inner_mac_off -= 4;
1220 outer_ip_off -= 4;
1221 }
1222
1223 nw_off = inner_ip_off - ETH_HLEN;
1224 skb_set_network_header(skb, nw_off);
1225 if (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) {
1226 struct ipv6hdr *iph = ipv6_hdr(skb);
1227
1228 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1229 len = skb->len - skb_transport_offset(skb);
1230 th = tcp_hdr(skb);
1231 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1232 } else {
1233 struct iphdr *iph = ip_hdr(skb);
1234
1235 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1236 len = skb->len - skb_transport_offset(skb);
1237 th = tcp_hdr(skb);
1238 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1239 }
1240
1241 if (inner_mac_off) { /* tunnel */
1242 struct udphdr *uh = NULL;
1243 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
1244 ETH_HLEN - 2));
1245
1246 if (proto == htons(ETH_P_IP)) {
1247 struct iphdr *iph = (struct iphdr *)skb->data;
1248
1249 if (iph->protocol == IPPROTO_UDP)
1250 uh = (struct udphdr *)(iph + 1);
1251 } else {
1252 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1253
1254 if (iph->nexthdr == IPPROTO_UDP)
1255 uh = (struct udphdr *)(iph + 1);
1256 }
1257 if (uh) {
1258 if (uh->check)
1259 skb_shinfo(skb)->gso_type |=
1260 SKB_GSO_UDP_TUNNEL_CSUM;
1261 else
1262 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1263 }
1264 }
1265#endif
1266 return skb;
1267}
1268
Michael Chanc0c050c2015-10-22 16:01:17 -04001269#define BNXT_IPV4_HDR_SIZE (sizeof(struct iphdr) + sizeof(struct tcphdr))
1270#define BNXT_IPV6_HDR_SIZE (sizeof(struct ipv6hdr) + sizeof(struct tcphdr))
1271
Michael Chan309369c2016-06-13 02:25:34 -04001272static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
1273 int payload_off, int tcp_ts,
Michael Chanc0c050c2015-10-22 16:01:17 -04001274 struct sk_buff *skb)
1275{
Michael Chand1611c32015-10-25 22:27:57 -04001276#ifdef CONFIG_INET
Michael Chanc0c050c2015-10-22 16:01:17 -04001277 struct tcphdr *th;
Michael Chan719ca812017-01-17 22:07:19 -05001278 int len, nw_off, tcp_opt_len = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001279
Michael Chan309369c2016-06-13 02:25:34 -04001280 if (tcp_ts)
Michael Chanc0c050c2015-10-22 16:01:17 -04001281 tcp_opt_len = 12;
1282
Michael Chanc0c050c2015-10-22 16:01:17 -04001283 if (tpa_info->gso_type == SKB_GSO_TCPV4) {
1284 struct iphdr *iph;
1285
1286 nw_off = payload_off - BNXT_IPV4_HDR_SIZE - tcp_opt_len -
1287 ETH_HLEN;
1288 skb_set_network_header(skb, nw_off);
1289 iph = ip_hdr(skb);
1290 skb_set_transport_header(skb, nw_off + sizeof(struct iphdr));
1291 len = skb->len - skb_transport_offset(skb);
1292 th = tcp_hdr(skb);
1293 th->check = ~tcp_v4_check(len, iph->saddr, iph->daddr, 0);
1294 } else if (tpa_info->gso_type == SKB_GSO_TCPV6) {
1295 struct ipv6hdr *iph;
1296
1297 nw_off = payload_off - BNXT_IPV6_HDR_SIZE - tcp_opt_len -
1298 ETH_HLEN;
1299 skb_set_network_header(skb, nw_off);
1300 iph = ipv6_hdr(skb);
1301 skb_set_transport_header(skb, nw_off + sizeof(struct ipv6hdr));
1302 len = skb->len - skb_transport_offset(skb);
1303 th = tcp_hdr(skb);
1304 th->check = ~tcp_v6_check(len, &iph->saddr, &iph->daddr, 0);
1305 } else {
1306 dev_kfree_skb_any(skb);
1307 return NULL;
1308 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001309
1310 if (nw_off) { /* tunnel */
1311 struct udphdr *uh = NULL;
1312
1313 if (skb->protocol == htons(ETH_P_IP)) {
1314 struct iphdr *iph = (struct iphdr *)skb->data;
1315
1316 if (iph->protocol == IPPROTO_UDP)
1317 uh = (struct udphdr *)(iph + 1);
1318 } else {
1319 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
1320
1321 if (iph->nexthdr == IPPROTO_UDP)
1322 uh = (struct udphdr *)(iph + 1);
1323 }
1324 if (uh) {
1325 if (uh->check)
1326 skb_shinfo(skb)->gso_type |=
1327 SKB_GSO_UDP_TUNNEL_CSUM;
1328 else
1329 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
1330 }
1331 }
1332#endif
1333 return skb;
1334}
1335
Michael Chan309369c2016-06-13 02:25:34 -04001336static inline struct sk_buff *bnxt_gro_skb(struct bnxt *bp,
1337 struct bnxt_tpa_info *tpa_info,
1338 struct rx_tpa_end_cmp *tpa_end,
1339 struct rx_tpa_end_cmp_ext *tpa_end1,
1340 struct sk_buff *skb)
1341{
1342#ifdef CONFIG_INET
1343 int payload_off;
1344 u16 segs;
1345
1346 segs = TPA_END_TPA_SEGS(tpa_end);
1347 if (segs == 1)
1348 return skb;
1349
1350 NAPI_GRO_CB(skb)->count = segs;
1351 skb_shinfo(skb)->gso_size =
1352 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
1353 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
1354 payload_off = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1355 RX_TPA_END_CMP_PAYLOAD_OFFSET) >>
1356 RX_TPA_END_CMP_PAYLOAD_OFFSET_SHIFT;
1357 skb = bp->gro_func(tpa_info, payload_off, TPA_END_GRO_TS(tpa_end), skb);
Michael Chan59109062016-12-29 12:13:35 -05001358 if (likely(skb))
1359 tcp_gro_complete(skb);
Michael Chan309369c2016-06-13 02:25:34 -04001360#endif
1361 return skb;
1362}
1363
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001364/* Given the cfa_code of a received packet determine which
1365 * netdev (vf-rep or PF) the packet is destined to.
1366 */
1367static struct net_device *bnxt_get_pkt_dev(struct bnxt *bp, u16 cfa_code)
1368{
1369 struct net_device *dev = bnxt_get_vf_rep(bp, cfa_code);
1370
1371 /* if vf-rep dev is NULL, the must belongs to the PF */
1372 return dev ? dev : bp->dev;
1373}
1374
Michael Chanc0c050c2015-10-22 16:01:17 -04001375static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp,
Michael Chane44758b2018-10-14 07:02:55 -04001376 struct bnxt_cp_ring_info *cpr,
Michael Chanc0c050c2015-10-22 16:01:17 -04001377 u32 *raw_cons,
1378 struct rx_tpa_end_cmp *tpa_end,
1379 struct rx_tpa_end_cmp_ext *tpa_end1,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001380 u8 *event)
Michael Chanc0c050c2015-10-22 16:01:17 -04001381{
Michael Chane44758b2018-10-14 07:02:55 -04001382 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001383 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001384 u8 agg_id = TPA_END_AGG_ID(tpa_end);
Michael Chan6bb19472017-02-06 16:55:32 -05001385 u8 *data_ptr, agg_bufs;
Michael Chanc0c050c2015-10-22 16:01:17 -04001386 u16 cp_cons = RING_CMP(*raw_cons);
1387 unsigned int len;
1388 struct bnxt_tpa_info *tpa_info;
1389 dma_addr_t mapping;
1390 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -05001391 void *data;
Michael Chanc0c050c2015-10-22 16:01:17 -04001392
Michael Chanfa7e2812016-05-10 19:18:00 -04001393 if (unlikely(bnapi->in_reset)) {
Michael Chane44758b2018-10-14 07:02:55 -04001394 int rc = bnxt_discard_rx(bp, cpr, raw_cons, tpa_end);
Michael Chanfa7e2812016-05-10 19:18:00 -04001395
1396 if (rc < 0)
1397 return ERR_PTR(-EBUSY);
1398 return NULL;
1399 }
1400
Michael Chanc0c050c2015-10-22 16:01:17 -04001401 tpa_info = &rxr->rx_tpa[agg_id];
1402 data = tpa_info->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001403 data_ptr = tpa_info->data_ptr;
1404 prefetch(data_ptr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001405 len = tpa_info->len;
1406 mapping = tpa_info->mapping;
1407
1408 agg_bufs = (le32_to_cpu(tpa_end->rx_tpa_end_cmp_misc_v1) &
1409 RX_TPA_END_CMP_AGG_BUFS) >> RX_TPA_END_CMP_AGG_BUFS_SHIFT;
1410
1411 if (agg_bufs) {
1412 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, raw_cons))
1413 return ERR_PTR(-EBUSY);
1414
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001415 *event |= BNXT_AGG_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001416 cp_cons = NEXT_CMP(cp_cons);
1417 }
1418
Michael Chan69c149e2017-06-23 14:01:00 -04001419 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
Michael Chane44758b2018-10-14 07:02:55 -04001420 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chan69c149e2017-06-23 14:01:00 -04001421 if (agg_bufs > MAX_SKB_FRAGS)
1422 netdev_warn(bp->dev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
1423 agg_bufs, (int)MAX_SKB_FRAGS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001424 return NULL;
1425 }
1426
1427 if (len <= bp->rx_copy_thresh) {
Michael Chan6bb19472017-02-06 16:55:32 -05001428 skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04001429 if (!skb) {
Michael Chane44758b2018-10-14 07:02:55 -04001430 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001431 return NULL;
1432 }
1433 } else {
1434 u8 *new_data;
1435 dma_addr_t new_mapping;
1436
1437 new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC);
1438 if (!new_data) {
Michael Chane44758b2018-10-14 07:02:55 -04001439 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001440 return NULL;
1441 }
1442
1443 tpa_info->data = new_data;
Michael Chanb3dba772017-02-06 16:55:35 -05001444 tpa_info->data_ptr = new_data + bp->rx_offset;
Michael Chanc0c050c2015-10-22 16:01:17 -04001445 tpa_info->mapping = new_mapping;
1446
1447 skb = build_skb(data, 0);
Shannon Nelsonc519fe92017-05-09 18:30:12 -07001448 dma_unmap_single_attrs(&bp->pdev->dev, mapping,
1449 bp->rx_buf_use_size, bp->rx_dir,
1450 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04001451
1452 if (!skb) {
1453 kfree(data);
Michael Chane44758b2018-10-14 07:02:55 -04001454 bnxt_abort_tpa(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001455 return NULL;
1456 }
Michael Chanb3dba772017-02-06 16:55:35 -05001457 skb_reserve(skb, bp->rx_offset);
Michael Chanc0c050c2015-10-22 16:01:17 -04001458 skb_put(skb, len);
1459 }
1460
1461 if (agg_bufs) {
Michael Chane44758b2018-10-14 07:02:55 -04001462 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001463 if (!skb) {
1464 /* Page reuse already handled by bnxt_rx_pages(). */
1465 return NULL;
1466 }
1467 }
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001468
1469 skb->protocol =
1470 eth_type_trans(skb, bnxt_get_pkt_dev(bp, tpa_info->cfa_code));
Michael Chanc0c050c2015-10-22 16:01:17 -04001471
1472 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
1473 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
1474
Michael Chan8852ddb2016-06-06 02:37:16 -04001475 if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) &&
1476 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001477 u16 vlan_proto = tpa_info->metadata >>
1478 RX_CMP_FLAGS2_METADATA_TPID_SFT;
Michael Chaned7bc6022018-03-09 23:46:06 -05001479 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001480
Michael Chan8852ddb2016-06-06 02:37:16 -04001481 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001482 }
1483
1484 skb_checksum_none_assert(skb);
1485 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
1486 skb->ip_summed = CHECKSUM_UNNECESSARY;
1487 skb->csum_level =
1488 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
1489 }
1490
1491 if (TPA_END_GRO(tpa_end))
Michael Chan309369c2016-06-13 02:25:34 -04001492 skb = bnxt_gro_skb(bp, tpa_info, tpa_end, tpa_end1, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001493
1494 return skb;
1495}
1496
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001497static void bnxt_deliver_skb(struct bnxt *bp, struct bnxt_napi *bnapi,
1498 struct sk_buff *skb)
1499{
1500 if (skb->dev != bp->dev) {
1501 /* this packet belongs to a vf-rep */
1502 bnxt_vf_rep_rx(bp, skb);
1503 return;
1504 }
1505 skb_record_rx_queue(skb, bnapi->index);
1506 napi_gro_receive(&bnapi->napi, skb);
1507}
1508
Michael Chanc0c050c2015-10-22 16:01:17 -04001509/* returns the following:
1510 * 1 - 1 packet successfully received
1511 * 0 - successful TPA_START, packet not completed yet
1512 * -EBUSY - completion ring does not have all the agg buffers yet
1513 * -ENOMEM - packet aborted due to out of memory
1514 * -EIO - packet aborted due to hw error indicated in BD
1515 */
Michael Chane44758b2018-10-14 07:02:55 -04001516static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1517 u32 *raw_cons, u8 *event)
Michael Chanc0c050c2015-10-22 16:01:17 -04001518{
Michael Chane44758b2018-10-14 07:02:55 -04001519 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanb6ab4b02016-01-02 23:44:59 -05001520 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04001521 struct net_device *dev = bp->dev;
1522 struct rx_cmp *rxcmp;
1523 struct rx_cmp_ext *rxcmp1;
1524 u32 tmp_raw_cons = *raw_cons;
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001525 u16 cfa_code, cons, prod, cp_cons = RING_CMP(tmp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001526 struct bnxt_sw_rx_bd *rx_buf;
1527 unsigned int len;
Michael Chan6bb19472017-02-06 16:55:32 -05001528 u8 *data_ptr, agg_bufs, cmp_type;
Michael Chanc0c050c2015-10-22 16:01:17 -04001529 dma_addr_t dma_addr;
1530 struct sk_buff *skb;
Michael Chan6bb19472017-02-06 16:55:32 -05001531 void *data;
Michael Chanc0c050c2015-10-22 16:01:17 -04001532 int rc = 0;
Michael Chanc61fb992017-02-06 16:55:36 -05001533 u32 misc;
Michael Chanc0c050c2015-10-22 16:01:17 -04001534
1535 rxcmp = (struct rx_cmp *)
1536 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1537
1538 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1539 cp_cons = RING_CMP(tmp_raw_cons);
1540 rxcmp1 = (struct rx_cmp_ext *)
1541 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1542
1543 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1544 return -EBUSY;
1545
1546 cmp_type = RX_CMP_TYPE(rxcmp);
1547
1548 prod = rxr->rx_prod;
1549
1550 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP) {
1551 bnxt_tpa_start(bp, rxr, (struct rx_tpa_start_cmp *)rxcmp,
1552 (struct rx_tpa_start_cmp_ext *)rxcmp1);
1553
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001554 *event |= BNXT_RX_EVENT;
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001555 goto next_rx_no_prod_no_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001556
1557 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
Michael Chane44758b2018-10-14 07:02:55 -04001558 skb = bnxt_tpa_end(bp, cpr, &tmp_raw_cons,
Michael Chanc0c050c2015-10-22 16:01:17 -04001559 (struct rx_tpa_end_cmp *)rxcmp,
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001560 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001561
Tobias Klauser1fac4b22017-09-26 15:12:26 +02001562 if (IS_ERR(skb))
Michael Chanc0c050c2015-10-22 16:01:17 -04001563 return -EBUSY;
1564
1565 rc = -ENOMEM;
1566 if (likely(skb)) {
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001567 bnxt_deliver_skb(bp, bnapi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001568 rc = 1;
1569 }
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001570 *event |= BNXT_RX_EVENT;
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001571 goto next_rx_no_prod_no_len;
Michael Chanc0c050c2015-10-22 16:01:17 -04001572 }
1573
1574 cons = rxcmp->rx_cmp_opaque;
1575 rx_buf = &rxr->rx_buf_ring[cons];
1576 data = rx_buf->data;
Michael Chan6bb19472017-02-06 16:55:32 -05001577 data_ptr = rx_buf->data_ptr;
Michael Chanfa7e2812016-05-10 19:18:00 -04001578 if (unlikely(cons != rxr->rx_next_cons)) {
Michael Chane44758b2018-10-14 07:02:55 -04001579 int rc1 = bnxt_discard_rx(bp, cpr, raw_cons, rxcmp);
Michael Chanfa7e2812016-05-10 19:18:00 -04001580
1581 bnxt_sched_reset(bp, rxr);
1582 return rc1;
1583 }
Michael Chan6bb19472017-02-06 16:55:32 -05001584 prefetch(data_ptr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001585
Michael Chanc61fb992017-02-06 16:55:36 -05001586 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
1587 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001588
1589 if (agg_bufs) {
1590 if (!bnxt_agg_bufs_valid(bp, cpr, agg_bufs, &tmp_raw_cons))
1591 return -EBUSY;
1592
1593 cp_cons = NEXT_CMP(cp_cons);
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001594 *event |= BNXT_AGG_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001595 }
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001596 *event |= BNXT_RX_EVENT;
Michael Chanc0c050c2015-10-22 16:01:17 -04001597
1598 rx_buf->data = NULL;
1599 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
1600 bnxt_reuse_rx_data(rxr, cons, data);
1601 if (agg_bufs)
Michael Chane44758b2018-10-14 07:02:55 -04001602 bnxt_reuse_rx_agg_bufs(cpr, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001603
1604 rc = -EIO;
1605 goto next_rx;
1606 }
1607
1608 len = le32_to_cpu(rxcmp->rx_cmp_len_flags_type) >> RX_CMP_LEN_SHIFT;
Michael Chan11cd1192017-02-06 16:55:33 -05001609 dma_addr = rx_buf->mapping;
Michael Chanc0c050c2015-10-22 16:01:17 -04001610
Michael Chanc6d30e82017-02-06 16:55:42 -05001611 if (bnxt_rx_xdp(bp, rxr, cons, data, &data_ptr, &len, event)) {
1612 rc = 1;
1613 goto next_rx;
1614 }
1615
Michael Chanc0c050c2015-10-22 16:01:17 -04001616 if (len <= bp->rx_copy_thresh) {
Michael Chan6bb19472017-02-06 16:55:32 -05001617 skb = bnxt_copy_skb(bnapi, data_ptr, len, dma_addr);
Michael Chanc0c050c2015-10-22 16:01:17 -04001618 bnxt_reuse_rx_data(rxr, cons, data);
1619 if (!skb) {
1620 rc = -ENOMEM;
1621 goto next_rx;
1622 }
1623 } else {
Michael Chanc61fb992017-02-06 16:55:36 -05001624 u32 payload;
1625
Michael Chanc6d30e82017-02-06 16:55:42 -05001626 if (rx_buf->data_ptr == data_ptr)
1627 payload = misc & RX_CMP_PAYLOAD_OFFSET;
1628 else
1629 payload = 0;
Michael Chan6bb19472017-02-06 16:55:32 -05001630 skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr,
Michael Chanc61fb992017-02-06 16:55:36 -05001631 payload | len);
Michael Chanc0c050c2015-10-22 16:01:17 -04001632 if (!skb) {
1633 rc = -ENOMEM;
1634 goto next_rx;
1635 }
1636 }
1637
1638 if (agg_bufs) {
Michael Chane44758b2018-10-14 07:02:55 -04001639 skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs);
Michael Chanc0c050c2015-10-22 16:01:17 -04001640 if (!skb) {
1641 rc = -ENOMEM;
1642 goto next_rx;
1643 }
1644 }
1645
1646 if (RX_CMP_HASH_VALID(rxcmp)) {
1647 u32 hash_type = RX_CMP_HASH_TYPE(rxcmp);
1648 enum pkt_hash_types type = PKT_HASH_TYPE_L4;
1649
1650 /* RSS profiles 1 and 3 with extract code 0 for inner 4-tuple */
1651 if (hash_type != 1 && hash_type != 3)
1652 type = PKT_HASH_TYPE_L3;
1653 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
1654 }
1655
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001656 cfa_code = RX_CMP_CFA_CODE(rxcmp1);
1657 skb->protocol = eth_type_trans(skb, bnxt_get_pkt_dev(bp, cfa_code));
Michael Chanc0c050c2015-10-22 16:01:17 -04001658
Michael Chan8852ddb2016-06-06 02:37:16 -04001659 if ((rxcmp1->rx_cmp_flags2 &
1660 cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)) &&
1661 (skb->dev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001662 u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
Michael Chaned7bc6022018-03-09 23:46:06 -05001663 u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04001664 u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT;
1665
Michael Chan8852ddb2016-06-06 02:37:16 -04001666 __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag);
Michael Chanc0c050c2015-10-22 16:01:17 -04001667 }
1668
1669 skb_checksum_none_assert(skb);
1670 if (RX_CMP_L4_CS_OK(rxcmp1)) {
1671 if (dev->features & NETIF_F_RXCSUM) {
1672 skb->ip_summed = CHECKSUM_UNNECESSARY;
1673 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
1674 }
1675 } else {
Satish Baddipadige665e3502015-12-27 18:19:21 -05001676 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L4_CS_ERR_BITS) {
1677 if (dev->features & NETIF_F_RXCSUM)
Michael Chand1981922018-11-15 03:25:38 -05001678 bnapi->cp_ring.rx_l4_csum_errors++;
Satish Baddipadige665e3502015-12-27 18:19:21 -05001679 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001680 }
1681
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04001682 bnxt_deliver_skb(bp, bnapi, skb);
Michael Chanc0c050c2015-10-22 16:01:17 -04001683 rc = 1;
1684
1685next_rx:
1686 rxr->rx_prod = NEXT_RX(prod);
Michael Chan376a5b82016-05-10 19:17:59 -04001687 rxr->rx_next_cons = NEXT_RX(cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04001688
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05001689 cpr->rx_packets += 1;
1690 cpr->rx_bytes += len;
Colin Ian Kinge7e70fa2018-01-16 10:22:50 +00001691
1692next_rx_no_prod_no_len:
Michael Chanc0c050c2015-10-22 16:01:17 -04001693 *raw_cons = tmp_raw_cons;
1694
1695 return rc;
1696}
1697
Michael Chan2270bc52017-06-23 14:01:01 -04001698/* In netpoll mode, if we are using a combined completion ring, we need to
1699 * discard the rx packets and recycle the buffers.
1700 */
Michael Chane44758b2018-10-14 07:02:55 -04001701static int bnxt_force_rx_discard(struct bnxt *bp,
1702 struct bnxt_cp_ring_info *cpr,
Michael Chan2270bc52017-06-23 14:01:01 -04001703 u32 *raw_cons, u8 *event)
1704{
Michael Chan2270bc52017-06-23 14:01:01 -04001705 u32 tmp_raw_cons = *raw_cons;
1706 struct rx_cmp_ext *rxcmp1;
1707 struct rx_cmp *rxcmp;
1708 u16 cp_cons;
1709 u8 cmp_type;
1710
1711 cp_cons = RING_CMP(tmp_raw_cons);
1712 rxcmp = (struct rx_cmp *)
1713 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1714
1715 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1716 cp_cons = RING_CMP(tmp_raw_cons);
1717 rxcmp1 = (struct rx_cmp_ext *)
1718 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1719
1720 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
1721 return -EBUSY;
1722
1723 cmp_type = RX_CMP_TYPE(rxcmp);
1724 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
1725 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1726 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1727 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1728 struct rx_tpa_end_cmp_ext *tpa_end1;
1729
1730 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1731 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1732 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1733 }
Michael Chane44758b2018-10-14 07:02:55 -04001734 return bnxt_rx_pkt(bp, cpr, raw_cons, event);
Michael Chan2270bc52017-06-23 14:01:01 -04001735}
1736
Michael Chan4bb13ab2016-04-05 14:09:01 -04001737#define BNXT_GET_EVENT_PORT(data) \
Michael Chan87c374d2016-12-02 21:17:16 -05001738 ((data) & \
1739 ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK)
Michael Chan4bb13ab2016-04-05 14:09:01 -04001740
Michael Chanc0c050c2015-10-22 16:01:17 -04001741static int bnxt_async_event_process(struct bnxt *bp,
1742 struct hwrm_async_event_cmpl *cmpl)
1743{
1744 u16 event_id = le16_to_cpu(cmpl->event_id);
1745
1746 /* TODO CHIMP_FW: Define event id's for link change, error etc */
1747 switch (event_id) {
Michael Chan87c374d2016-12-02 21:17:16 -05001748 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_CHANGE: {
Michael Chan8cbde112016-04-11 04:11:14 -04001749 u32 data1 = le32_to_cpu(cmpl->event_data1);
1750 struct bnxt_link_info *link_info = &bp->link_info;
1751
1752 if (BNXT_VF(bp))
1753 goto async_event_process_exit;
Michael Chana8168b62017-12-06 17:31:22 -05001754
1755 /* print unsupported speed warning in forced speed mode only */
1756 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED) &&
1757 (data1 & 0x20000)) {
Michael Chan8cbde112016-04-11 04:11:14 -04001758 u16 fw_speed = link_info->force_link_speed;
1759 u32 speed = bnxt_fw_to_ethtool_speed(fw_speed);
1760
Michael Chana8168b62017-12-06 17:31:22 -05001761 if (speed != SPEED_UNKNOWN)
1762 netdev_warn(bp->dev, "Link speed %d no longer supported\n",
1763 speed);
Michael Chan8cbde112016-04-11 04:11:14 -04001764 }
Michael Chan286ef9d2016-11-16 21:13:08 -05001765 set_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT, &bp->sp_event);
Michael Chan8cbde112016-04-11 04:11:14 -04001766 }
Gustavo A. R. Silvabc171e82018-08-07 18:11:14 -05001767 /* fall through */
Michael Chan87c374d2016-12-02 21:17:16 -05001768 case ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE:
Michael Chanc0c050c2015-10-22 16:01:17 -04001769 set_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event);
Jeffrey Huang19241362016-02-26 04:00:00 -05001770 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001771 case ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD:
Jeffrey Huang19241362016-02-26 04:00:00 -05001772 set_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001773 break;
Michael Chan87c374d2016-12-02 21:17:16 -05001774 case ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED: {
Michael Chan4bb13ab2016-04-05 14:09:01 -04001775 u32 data1 = le32_to_cpu(cmpl->event_data1);
1776 u16 port_id = BNXT_GET_EVENT_PORT(data1);
1777
1778 if (BNXT_VF(bp))
1779 break;
1780
1781 if (bp->pf.port_id != port_id)
1782 break;
1783
Michael Chan4bb13ab2016-04-05 14:09:01 -04001784 set_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event);
1785 break;
1786 }
Michael Chan87c374d2016-12-02 21:17:16 -05001787 case ASYNC_EVENT_CMPL_EVENT_ID_VF_CFG_CHANGE:
Michael Chanfc0f1922016-06-13 02:25:30 -04001788 if (BNXT_PF(bp))
1789 goto async_event_process_exit;
1790 set_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event);
1791 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001792 default:
Jeffrey Huang19241362016-02-26 04:00:00 -05001793 goto async_event_process_exit;
Michael Chanc0c050c2015-10-22 16:01:17 -04001794 }
Michael Chanc213eae2017-10-13 21:09:29 -04001795 bnxt_queue_sp_work(bp);
Jeffrey Huang19241362016-02-26 04:00:00 -05001796async_event_process_exit:
Michael Chana588e452016-12-07 00:26:21 -05001797 bnxt_ulp_async_events(bp, cmpl);
Michael Chanc0c050c2015-10-22 16:01:17 -04001798 return 0;
1799}
1800
1801static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp)
1802{
1803 u16 cmpl_type = TX_CMP_TYPE(txcmp), vf_id, seq_id;
1804 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1805 struct hwrm_fwd_req_cmpl *fwd_req_cmpl =
1806 (struct hwrm_fwd_req_cmpl *)txcmp;
1807
1808 switch (cmpl_type) {
1809 case CMPL_BASE_TYPE_HWRM_DONE:
1810 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1811 if (seq_id == bp->hwrm_intr_seq_id)
1812 bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID;
1813 else
1814 netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id);
1815 break;
1816
1817 case CMPL_BASE_TYPE_HWRM_FWD_REQ:
1818 vf_id = le16_to_cpu(fwd_req_cmpl->source_id);
1819
1820 if ((vf_id < bp->pf.first_vf_id) ||
1821 (vf_id >= bp->pf.first_vf_id + bp->pf.active_vfs)) {
1822 netdev_err(bp->dev, "Msg contains invalid VF id %x\n",
1823 vf_id);
1824 return -EINVAL;
1825 }
1826
1827 set_bit(vf_id - bp->pf.first_vf_id, bp->pf.vf_event_bmap);
1828 set_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04001829 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04001830 break;
1831
1832 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1833 bnxt_async_event_process(bp,
1834 (struct hwrm_async_event_cmpl *)txcmp);
1835
1836 default:
1837 break;
1838 }
1839
1840 return 0;
1841}
1842
1843static irqreturn_t bnxt_msix(int irq, void *dev_instance)
1844{
1845 struct bnxt_napi *bnapi = dev_instance;
1846 struct bnxt *bp = bnapi->bp;
1847 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1848 u32 cons = RING_CMP(cpr->cp_raw_cons);
1849
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05001850 cpr->event_ctr++;
Michael Chanc0c050c2015-10-22 16:01:17 -04001851 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1852 napi_schedule(&bnapi->napi);
1853 return IRQ_HANDLED;
1854}
1855
1856static inline int bnxt_has_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr)
1857{
1858 u32 raw_cons = cpr->cp_raw_cons;
1859 u16 cons = RING_CMP(raw_cons);
1860 struct tx_cmp *txcmp;
1861
1862 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1863
1864 return TX_CMP_VALID(txcmp, raw_cons);
1865}
1866
Michael Chanc0c050c2015-10-22 16:01:17 -04001867static irqreturn_t bnxt_inta(int irq, void *dev_instance)
1868{
1869 struct bnxt_napi *bnapi = dev_instance;
1870 struct bnxt *bp = bnapi->bp;
1871 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
1872 u32 cons = RING_CMP(cpr->cp_raw_cons);
1873 u32 int_status;
1874
1875 prefetch(&cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)]);
1876
1877 if (!bnxt_has_work(bp, cpr)) {
Jeffrey Huang11809492015-11-05 16:25:49 -05001878 int_status = readl(bp->bar0 + BNXT_CAG_REG_LEGACY_INT_STATUS);
Michael Chanc0c050c2015-10-22 16:01:17 -04001879 /* return if erroneous interrupt */
1880 if (!(int_status & (0x10000 << cpr->cp_ring_struct.fw_ring_id)))
1881 return IRQ_NONE;
1882 }
1883
1884 /* disable ring IRQ */
Michael Chan697197e2018-10-14 07:02:46 -04001885 BNXT_CP_DB_IRQ_DIS(cpr->cp_db.doorbell);
Michael Chanc0c050c2015-10-22 16:01:17 -04001886
1887 /* Return here if interrupt is shared and is disabled. */
1888 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1889 return IRQ_HANDLED;
1890
1891 napi_schedule(&bnapi->napi);
1892 return IRQ_HANDLED;
1893}
1894
Michael Chan3675b922018-10-14 07:02:57 -04001895static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1896 int budget)
Michael Chanc0c050c2015-10-22 16:01:17 -04001897{
Michael Chane44758b2018-10-14 07:02:55 -04001898 struct bnxt_napi *bnapi = cpr->bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04001899 u32 raw_cons = cpr->cp_raw_cons;
1900 u32 cons;
1901 int tx_pkts = 0;
1902 int rx_pkts = 0;
Michael Chan4e5dbbda2017-02-06 16:55:37 -05001903 u8 event = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001904 struct tx_cmp *txcmp;
1905
Michael Chan0fcec982018-10-14 07:02:58 -04001906 cpr->has_more_work = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04001907 while (1) {
1908 int rc;
1909
1910 cons = RING_CMP(raw_cons);
1911 txcmp = &cpr->cp_desc_ring[CP_RING(cons)][CP_IDX(cons)];
1912
1913 if (!TX_CMP_VALID(txcmp, raw_cons))
1914 break;
1915
Michael Chan67a95e22016-05-04 16:56:43 -04001916 /* The valid test of the entry must be done first before
1917 * reading any further.
1918 */
Michael Chanb67daab2016-05-15 03:04:51 -04001919 dma_rmb();
Michael Chan3675b922018-10-14 07:02:57 -04001920 cpr->had_work_done = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04001921 if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
1922 tx_pkts++;
1923 /* return full budget so NAPI will complete. */
Michael Chan73f21c62018-09-26 00:41:04 -04001924 if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04001925 rx_pkts = budget;
Michael Chan73f21c62018-09-26 00:41:04 -04001926 raw_cons = NEXT_RAW_CMP(raw_cons);
Michael Chan0fcec982018-10-14 07:02:58 -04001927 if (budget)
1928 cpr->has_more_work = 1;
Michael Chan73f21c62018-09-26 00:41:04 -04001929 break;
1930 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001931 } else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
Michael Chan2270bc52017-06-23 14:01:01 -04001932 if (likely(budget))
Michael Chane44758b2018-10-14 07:02:55 -04001933 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
Michael Chan2270bc52017-06-23 14:01:01 -04001934 else
Michael Chane44758b2018-10-14 07:02:55 -04001935 rc = bnxt_force_rx_discard(bp, cpr, &raw_cons,
Michael Chan2270bc52017-06-23 14:01:01 -04001936 &event);
Michael Chanc0c050c2015-10-22 16:01:17 -04001937 if (likely(rc >= 0))
1938 rx_pkts += rc;
Michael Chan903649e2017-08-28 13:40:30 -04001939 /* Increment rx_pkts when rc is -ENOMEM to count towards
1940 * the NAPI budget. Otherwise, we may potentially loop
1941 * here forever if we consistently cannot allocate
1942 * buffers.
1943 */
Calvin Owens2edbdb32017-12-08 09:05:26 -08001944 else if (rc == -ENOMEM && budget)
Michael Chan903649e2017-08-28 13:40:30 -04001945 rx_pkts++;
Michael Chanc0c050c2015-10-22 16:01:17 -04001946 else if (rc == -EBUSY) /* partial completion */
1947 break;
Michael Chanc0c050c2015-10-22 16:01:17 -04001948 } else if (unlikely((TX_CMP_TYPE(txcmp) ==
1949 CMPL_BASE_TYPE_HWRM_DONE) ||
1950 (TX_CMP_TYPE(txcmp) ==
1951 CMPL_BASE_TYPE_HWRM_FWD_REQ) ||
1952 (TX_CMP_TYPE(txcmp) ==
1953 CMPL_BASE_TYPE_HWRM_ASYNC_EVENT))) {
1954 bnxt_hwrm_handler(bp, txcmp);
1955 }
1956 raw_cons = NEXT_RAW_CMP(raw_cons);
1957
Michael Chan0fcec982018-10-14 07:02:58 -04001958 if (rx_pkts && rx_pkts == budget) {
1959 cpr->has_more_work = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04001960 break;
Michael Chan0fcec982018-10-14 07:02:58 -04001961 }
Michael Chanc0c050c2015-10-22 16:01:17 -04001962 }
1963
Michael Chan38413402017-02-06 16:55:43 -05001964 if (event & BNXT_TX_EVENT) {
1965 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan38413402017-02-06 16:55:43 -05001966 u16 prod = txr->tx_prod;
1967
1968 /* Sync BD data before updating doorbell */
1969 wmb();
1970
Michael Chan697197e2018-10-14 07:02:46 -04001971 bnxt_db_write_relaxed(bp, &txr->tx_db, prod);
Michael Chan38413402017-02-06 16:55:43 -05001972 }
1973
Michael Chanc0c050c2015-10-22 16:01:17 -04001974 cpr->cp_raw_cons = raw_cons;
Michael Chan3675b922018-10-14 07:02:57 -04001975 bnapi->tx_pkts += tx_pkts;
1976 bnapi->events |= event;
1977 return rx_pkts;
1978}
1979
1980static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi)
1981{
1982 if (bnapi->tx_pkts) {
1983 bnapi->tx_int(bp, bnapi, bnapi->tx_pkts);
1984 bnapi->tx_pkts = 0;
1985 }
1986
1987 if (bnapi->events & BNXT_RX_EVENT) {
1988 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
1989
1990 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
1991 if (bnapi->events & BNXT_AGG_EVENT)
1992 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
1993 }
1994 bnapi->events = 0;
1995}
1996
1997static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr,
1998 int budget)
1999{
2000 struct bnxt_napi *bnapi = cpr->bnapi;
2001 int rx_pkts;
2002
2003 rx_pkts = __bnxt_poll_work(bp, cpr, budget);
2004
Michael Chanc0c050c2015-10-22 16:01:17 -04002005 /* ACK completion ring before freeing tx ring and producing new
2006 * buffers in rx/agg rings to prevent overflowing the completion
2007 * ring.
2008 */
Michael Chan697197e2018-10-14 07:02:46 -04002009 bnxt_db_cq(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04002010
Michael Chan3675b922018-10-14 07:02:57 -04002011 __bnxt_poll_work_done(bp, bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04002012 return rx_pkts;
2013}
2014
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002015static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
2016{
2017 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2018 struct bnxt *bp = bnapi->bp;
2019 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2020 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
2021 struct tx_cmp *txcmp;
2022 struct rx_cmp_ext *rxcmp1;
2023 u32 cp_cons, tmp_raw_cons;
2024 u32 raw_cons = cpr->cp_raw_cons;
2025 u32 rx_pkts = 0;
Michael Chan4e5dbbda2017-02-06 16:55:37 -05002026 u8 event = 0;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002027
2028 while (1) {
2029 int rc;
2030
2031 cp_cons = RING_CMP(raw_cons);
2032 txcmp = &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2033
2034 if (!TX_CMP_VALID(txcmp, raw_cons))
2035 break;
2036
2037 if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
2038 tmp_raw_cons = NEXT_RAW_CMP(raw_cons);
2039 cp_cons = RING_CMP(tmp_raw_cons);
2040 rxcmp1 = (struct rx_cmp_ext *)
2041 &cpr->cp_desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
2042
2043 if (!RX_CMP_VALID(rxcmp1, tmp_raw_cons))
2044 break;
2045
2046 /* force an error to recycle the buffer */
2047 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
2048 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
2049
Michael Chane44758b2018-10-14 07:02:55 -04002050 rc = bnxt_rx_pkt(bp, cpr, &raw_cons, &event);
Calvin Owens2edbdb32017-12-08 09:05:26 -08002051 if (likely(rc == -EIO) && budget)
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002052 rx_pkts++;
2053 else if (rc == -EBUSY) /* partial completion */
2054 break;
2055 } else if (unlikely(TX_CMP_TYPE(txcmp) ==
2056 CMPL_BASE_TYPE_HWRM_DONE)) {
2057 bnxt_hwrm_handler(bp, txcmp);
2058 } else {
2059 netdev_err(bp->dev,
2060 "Invalid completion received on special ring\n");
2061 }
2062 raw_cons = NEXT_RAW_CMP(raw_cons);
2063
2064 if (rx_pkts == budget)
2065 break;
2066 }
2067
2068 cpr->cp_raw_cons = raw_cons;
Michael Chan697197e2018-10-14 07:02:46 -04002069 BNXT_DB_CQ(&cpr->cp_db, cpr->cp_raw_cons);
2070 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002071
Michael Chan434c9752017-05-29 19:06:08 -04002072 if (event & BNXT_AGG_EVENT)
Michael Chan697197e2018-10-14 07:02:46 -04002073 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002074
2075 if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08002076 napi_complete_done(napi, rx_pkts);
Michael Chan697197e2018-10-14 07:02:46 -04002077 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04002078 }
2079 return rx_pkts;
2080}
2081
Michael Chanc0c050c2015-10-22 16:01:17 -04002082static int bnxt_poll(struct napi_struct *napi, int budget)
2083{
2084 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2085 struct bnxt *bp = bnapi->bp;
2086 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2087 int work_done = 0;
2088
Michael Chanc0c050c2015-10-22 16:01:17 -04002089 while (1) {
Michael Chane44758b2018-10-14 07:02:55 -04002090 work_done += bnxt_poll_work(bp, cpr, budget - work_done);
Michael Chanc0c050c2015-10-22 16:01:17 -04002091
Michael Chan73f21c62018-09-26 00:41:04 -04002092 if (work_done >= budget) {
2093 if (!budget)
Michael Chan697197e2018-10-14 07:02:46 -04002094 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04002095 break;
Michael Chan73f21c62018-09-26 00:41:04 -04002096 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002097
2098 if (!bnxt_has_work(bp, cpr)) {
Michael Chane7b95692016-12-29 12:13:32 -05002099 if (napi_complete_done(napi, work_done))
Michael Chan697197e2018-10-14 07:02:46 -04002100 BNXT_DB_CQ_ARM(&cpr->cp_db, cpr->cp_raw_cons);
Michael Chanc0c050c2015-10-22 16:01:17 -04002101 break;
2102 }
2103 }
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05002104 if (bp->flags & BNXT_FLAG_DIM) {
2105 struct net_dim_sample dim_sample;
2106
2107 net_dim_sample(cpr->event_ctr,
2108 cpr->rx_packets,
2109 cpr->rx_bytes,
2110 &dim_sample);
2111 net_dim(&cpr->dim, dim_sample);
2112 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002113 mmiowb();
Michael Chanc0c050c2015-10-22 16:01:17 -04002114 return work_done;
2115}
2116
Michael Chan0fcec982018-10-14 07:02:58 -04002117static int __bnxt_poll_cqs(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
2118{
2119 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2120 int i, work_done = 0;
2121
2122 for (i = 0; i < 2; i++) {
2123 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2124
2125 if (cpr2) {
2126 work_done += __bnxt_poll_work(bp, cpr2,
2127 budget - work_done);
2128 cpr->has_more_work |= cpr2->has_more_work;
2129 }
2130 }
2131 return work_done;
2132}
2133
2134static void __bnxt_poll_cqs_done(struct bnxt *bp, struct bnxt_napi *bnapi,
2135 u64 dbr_type, bool all)
2136{
2137 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2138 int i;
2139
2140 for (i = 0; i < 2; i++) {
2141 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[i];
2142 struct bnxt_db_info *db;
2143
2144 if (cpr2 && (all || cpr2->had_work_done)) {
2145 db = &cpr2->cp_db;
2146 writeq(db->db_key64 | dbr_type |
2147 RING_CMP(cpr2->cp_raw_cons), db->doorbell);
2148 cpr2->had_work_done = 0;
2149 }
2150 }
2151 __bnxt_poll_work_done(bp, bnapi);
2152}
2153
2154static int bnxt_poll_p5(struct napi_struct *napi, int budget)
2155{
2156 struct bnxt_napi *bnapi = container_of(napi, struct bnxt_napi, napi);
2157 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
2158 u32 raw_cons = cpr->cp_raw_cons;
2159 struct bnxt *bp = bnapi->bp;
2160 struct nqe_cn *nqcmp;
2161 int work_done = 0;
2162 u32 cons;
2163
2164 if (cpr->has_more_work) {
2165 cpr->has_more_work = 0;
2166 work_done = __bnxt_poll_cqs(bp, bnapi, budget);
2167 if (cpr->has_more_work) {
2168 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, false);
2169 return work_done;
2170 }
2171 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL, true);
2172 if (napi_complete_done(napi, work_done))
2173 BNXT_DB_NQ_ARM_P5(&cpr->cp_db, cpr->cp_raw_cons);
2174 return work_done;
2175 }
2176 while (1) {
2177 cons = RING_CMP(raw_cons);
2178 nqcmp = &cpr->nq_desc_ring[CP_RING(cons)][CP_IDX(cons)];
2179
2180 if (!NQ_CMP_VALID(nqcmp, raw_cons)) {
2181 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ_ARMALL,
2182 false);
2183 cpr->cp_raw_cons = raw_cons;
2184 if (napi_complete_done(napi, work_done))
2185 BNXT_DB_NQ_ARM_P5(&cpr->cp_db,
2186 cpr->cp_raw_cons);
2187 return work_done;
2188 }
2189
2190 /* The valid test of the entry must be done first before
2191 * reading any further.
2192 */
2193 dma_rmb();
2194
2195 if (nqcmp->type == cpu_to_le16(NQ_CN_TYPE_CQ_NOTIFICATION)) {
2196 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
2197 struct bnxt_cp_ring_info *cpr2;
2198
2199 cpr2 = cpr->cp_ring_arr[idx];
2200 work_done += __bnxt_poll_work(bp, cpr2,
2201 budget - work_done);
2202 cpr->has_more_work = cpr2->has_more_work;
2203 } else {
2204 bnxt_hwrm_handler(bp, (struct tx_cmp *)nqcmp);
2205 }
2206 raw_cons = NEXT_RAW_CMP(raw_cons);
2207 if (cpr->has_more_work)
2208 break;
2209 }
2210 __bnxt_poll_cqs_done(bp, bnapi, DBR_TYPE_CQ, true);
2211 cpr->cp_raw_cons = raw_cons;
2212 return work_done;
2213}
2214
Michael Chanc0c050c2015-10-22 16:01:17 -04002215static void bnxt_free_tx_skbs(struct bnxt *bp)
2216{
2217 int i, max_idx;
2218 struct pci_dev *pdev = bp->pdev;
2219
Michael Chanb6ab4b02016-01-02 23:44:59 -05002220 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002221 return;
2222
2223 max_idx = bp->tx_nr_pages * TX_DESC_CNT;
2224 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002225 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002226 int j;
2227
Michael Chanc0c050c2015-10-22 16:01:17 -04002228 for (j = 0; j < max_idx;) {
2229 struct bnxt_sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
2230 struct sk_buff *skb = tx_buf->skb;
2231 int k, last;
2232
2233 if (!skb) {
2234 j++;
2235 continue;
2236 }
2237
2238 tx_buf->skb = NULL;
2239
2240 if (tx_buf->is_push) {
2241 dev_kfree_skb(skb);
2242 j += 2;
2243 continue;
2244 }
2245
2246 dma_unmap_single(&pdev->dev,
2247 dma_unmap_addr(tx_buf, mapping),
2248 skb_headlen(skb),
2249 PCI_DMA_TODEVICE);
2250
2251 last = tx_buf->nr_frags;
2252 j += 2;
Michael Chand612a572016-01-28 03:11:22 -05002253 for (k = 0; k < last; k++, j++) {
2254 int ring_idx = j & bp->tx_ring_mask;
Michael Chanc0c050c2015-10-22 16:01:17 -04002255 skb_frag_t *frag = &skb_shinfo(skb)->frags[k];
2256
Michael Chand612a572016-01-28 03:11:22 -05002257 tx_buf = &txr->tx_buf_ring[ring_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04002258 dma_unmap_page(
2259 &pdev->dev,
2260 dma_unmap_addr(tx_buf, mapping),
2261 skb_frag_size(frag), PCI_DMA_TODEVICE);
2262 }
2263 dev_kfree_skb(skb);
2264 }
2265 netdev_tx_reset_queue(netdev_get_tx_queue(bp->dev, i));
2266 }
2267}
2268
2269static void bnxt_free_rx_skbs(struct bnxt *bp)
2270{
2271 int i, max_idx, max_agg_idx;
2272 struct pci_dev *pdev = bp->pdev;
2273
Michael Chanb6ab4b02016-01-02 23:44:59 -05002274 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002275 return;
2276
2277 max_idx = bp->rx_nr_pages * RX_DESC_CNT;
2278 max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT;
2279 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002280 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002281 int j;
2282
Michael Chanc0c050c2015-10-22 16:01:17 -04002283 if (rxr->rx_tpa) {
2284 for (j = 0; j < MAX_TPA; j++) {
2285 struct bnxt_tpa_info *tpa_info =
2286 &rxr->rx_tpa[j];
2287 u8 *data = tpa_info->data;
2288
2289 if (!data)
2290 continue;
2291
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002292 dma_unmap_single_attrs(&pdev->dev,
2293 tpa_info->mapping,
2294 bp->rx_buf_use_size,
2295 bp->rx_dir,
2296 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04002297
2298 tpa_info->data = NULL;
2299
2300 kfree(data);
2301 }
2302 }
2303
2304 for (j = 0; j < max_idx; j++) {
2305 struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[j];
Michael Chan3ed3a832017-03-28 19:47:31 -04002306 dma_addr_t mapping = rx_buf->mapping;
Michael Chan6bb19472017-02-06 16:55:32 -05002307 void *data = rx_buf->data;
Michael Chanc0c050c2015-10-22 16:01:17 -04002308
2309 if (!data)
2310 continue;
2311
Michael Chanc0c050c2015-10-22 16:01:17 -04002312 rx_buf->data = NULL;
2313
Michael Chan3ed3a832017-03-28 19:47:31 -04002314 if (BNXT_RX_PAGE_MODE(bp)) {
2315 mapping -= bp->rx_dma_offset;
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002316 dma_unmap_page_attrs(&pdev->dev, mapping,
2317 PAGE_SIZE, bp->rx_dir,
2318 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -05002319 __free_page(data);
Michael Chan3ed3a832017-03-28 19:47:31 -04002320 } else {
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002321 dma_unmap_single_attrs(&pdev->dev, mapping,
2322 bp->rx_buf_use_size,
2323 bp->rx_dir,
2324 DMA_ATTR_WEAK_ORDERING);
Michael Chanc61fb992017-02-06 16:55:36 -05002325 kfree(data);
Michael Chan3ed3a832017-03-28 19:47:31 -04002326 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002327 }
2328
2329 for (j = 0; j < max_agg_idx; j++) {
2330 struct bnxt_sw_rx_agg_bd *rx_agg_buf =
2331 &rxr->rx_agg_ring[j];
2332 struct page *page = rx_agg_buf->page;
2333
2334 if (!page)
2335 continue;
2336
Shannon Nelsonc519fe92017-05-09 18:30:12 -07002337 dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping,
2338 BNXT_RX_PAGE_SIZE,
2339 PCI_DMA_FROMDEVICE,
2340 DMA_ATTR_WEAK_ORDERING);
Michael Chanc0c050c2015-10-22 16:01:17 -04002341
2342 rx_agg_buf->page = NULL;
2343 __clear_bit(j, rxr->rx_agg_bmap);
2344
2345 __free_page(page);
2346 }
Michael Chan89d0a062016-04-25 02:30:51 -04002347 if (rxr->rx_page) {
2348 __free_page(rxr->rx_page);
2349 rxr->rx_page = NULL;
2350 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002351 }
2352}
2353
2354static void bnxt_free_skbs(struct bnxt *bp)
2355{
2356 bnxt_free_tx_skbs(bp);
2357 bnxt_free_rx_skbs(bp);
2358}
2359
Michael Chan6fe19882018-10-14 07:02:41 -04002360static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
Michael Chanc0c050c2015-10-22 16:01:17 -04002361{
2362 struct pci_dev *pdev = bp->pdev;
2363 int i;
2364
Michael Chan6fe19882018-10-14 07:02:41 -04002365 for (i = 0; i < rmem->nr_pages; i++) {
2366 if (!rmem->pg_arr[i])
Michael Chanc0c050c2015-10-22 16:01:17 -04002367 continue;
2368
Michael Chan6fe19882018-10-14 07:02:41 -04002369 dma_free_coherent(&pdev->dev, rmem->page_size,
2370 rmem->pg_arr[i], rmem->dma_arr[i]);
Michael Chanc0c050c2015-10-22 16:01:17 -04002371
Michael Chan6fe19882018-10-14 07:02:41 -04002372 rmem->pg_arr[i] = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002373 }
Michael Chan6fe19882018-10-14 07:02:41 -04002374 if (rmem->pg_tbl) {
2375 dma_free_coherent(&pdev->dev, rmem->nr_pages * 8,
2376 rmem->pg_tbl, rmem->pg_tbl_map);
2377 rmem->pg_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002378 }
Michael Chan6fe19882018-10-14 07:02:41 -04002379 if (rmem->vmem_size && *rmem->vmem) {
2380 vfree(*rmem->vmem);
2381 *rmem->vmem = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04002382 }
2383}
2384
Michael Chan6fe19882018-10-14 07:02:41 -04002385static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
Michael Chanc0c050c2015-10-22 16:01:17 -04002386{
Michael Chanc0c050c2015-10-22 16:01:17 -04002387 struct pci_dev *pdev = bp->pdev;
Michael Chan66cca202018-10-14 07:02:42 -04002388 u64 valid_bit = 0;
Michael Chan6fe19882018-10-14 07:02:41 -04002389 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04002390
Michael Chan66cca202018-10-14 07:02:42 -04002391 if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG))
2392 valid_bit = PTU_PTE_VALID;
Michael Chan6fe19882018-10-14 07:02:41 -04002393 if (rmem->nr_pages > 1) {
2394 rmem->pg_tbl = dma_alloc_coherent(&pdev->dev,
2395 rmem->nr_pages * 8,
2396 &rmem->pg_tbl_map,
Michael Chanc0c050c2015-10-22 16:01:17 -04002397 GFP_KERNEL);
Michael Chan6fe19882018-10-14 07:02:41 -04002398 if (!rmem->pg_tbl)
Michael Chanc0c050c2015-10-22 16:01:17 -04002399 return -ENOMEM;
2400 }
2401
Michael Chan6fe19882018-10-14 07:02:41 -04002402 for (i = 0; i < rmem->nr_pages; i++) {
Michael Chan66cca202018-10-14 07:02:42 -04002403 u64 extra_bits = valid_bit;
2404
Michael Chan6fe19882018-10-14 07:02:41 -04002405 rmem->pg_arr[i] = dma_alloc_coherent(&pdev->dev,
2406 rmem->page_size,
2407 &rmem->dma_arr[i],
Michael Chanc0c050c2015-10-22 16:01:17 -04002408 GFP_KERNEL);
Michael Chan6fe19882018-10-14 07:02:41 -04002409 if (!rmem->pg_arr[i])
Michael Chanc0c050c2015-10-22 16:01:17 -04002410 return -ENOMEM;
2411
Michael Chan66cca202018-10-14 07:02:42 -04002412 if (rmem->nr_pages > 1) {
2413 if (i == rmem->nr_pages - 2 &&
2414 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2415 extra_bits |= PTU_PTE_NEXT_TO_LAST;
2416 else if (i == rmem->nr_pages - 1 &&
2417 (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
2418 extra_bits |= PTU_PTE_LAST;
2419 rmem->pg_tbl[i] =
2420 cpu_to_le64(rmem->dma_arr[i] | extra_bits);
2421 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002422 }
2423
Michael Chan6fe19882018-10-14 07:02:41 -04002424 if (rmem->vmem_size) {
2425 *rmem->vmem = vzalloc(rmem->vmem_size);
2426 if (!(*rmem->vmem))
Michael Chanc0c050c2015-10-22 16:01:17 -04002427 return -ENOMEM;
2428 }
2429 return 0;
2430}
2431
2432static void bnxt_free_rx_rings(struct bnxt *bp)
2433{
2434 int i;
2435
Michael Chanb6ab4b02016-01-02 23:44:59 -05002436 if (!bp->rx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002437 return;
2438
2439 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002440 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002441 struct bnxt_ring_struct *ring;
2442
Michael Chanc6d30e82017-02-06 16:55:42 -05002443 if (rxr->xdp_prog)
2444 bpf_prog_put(rxr->xdp_prog);
2445
Jesper Dangaard Brouer96a86042018-01-03 11:25:44 +01002446 if (xdp_rxq_info_is_reg(&rxr->xdp_rxq))
2447 xdp_rxq_info_unreg(&rxr->xdp_rxq);
2448
Michael Chanc0c050c2015-10-22 16:01:17 -04002449 kfree(rxr->rx_tpa);
2450 rxr->rx_tpa = NULL;
2451
2452 kfree(rxr->rx_agg_bmap);
2453 rxr->rx_agg_bmap = NULL;
2454
2455 ring = &rxr->rx_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002456 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002457
2458 ring = &rxr->rx_agg_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002459 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002460 }
2461}
2462
2463static int bnxt_alloc_rx_rings(struct bnxt *bp)
2464{
2465 int i, rc, agg_rings = 0, tpa_rings = 0;
2466
Michael Chanb6ab4b02016-01-02 23:44:59 -05002467 if (!bp->rx_ring)
2468 return -ENOMEM;
2469
Michael Chanc0c050c2015-10-22 16:01:17 -04002470 if (bp->flags & BNXT_FLAG_AGG_RINGS)
2471 agg_rings = 1;
2472
2473 if (bp->flags & BNXT_FLAG_TPA)
2474 tpa_rings = 1;
2475
2476 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002477 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002478 struct bnxt_ring_struct *ring;
2479
Michael Chanc0c050c2015-10-22 16:01:17 -04002480 ring = &rxr->rx_ring_struct;
2481
Jesper Dangaard Brouer96a86042018-01-03 11:25:44 +01002482 rc = xdp_rxq_info_reg(&rxr->xdp_rxq, bp->dev, i);
2483 if (rc < 0)
2484 return rc;
2485
Michael Chan6fe19882018-10-14 07:02:41 -04002486 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002487 if (rc)
2488 return rc;
2489
Michael Chan2c61d212018-10-14 07:02:50 -04002490 ring->grp_idx = i;
Michael Chanc0c050c2015-10-22 16:01:17 -04002491 if (agg_rings) {
2492 u16 mem_size;
2493
2494 ring = &rxr->rx_agg_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002495 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002496 if (rc)
2497 return rc;
2498
Michael Chan9899bb52018-03-31 13:54:16 -04002499 ring->grp_idx = i;
Michael Chanc0c050c2015-10-22 16:01:17 -04002500 rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1;
2501 mem_size = rxr->rx_agg_bmap_size / 8;
2502 rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL);
2503 if (!rxr->rx_agg_bmap)
2504 return -ENOMEM;
2505
2506 if (tpa_rings) {
2507 rxr->rx_tpa = kcalloc(MAX_TPA,
2508 sizeof(struct bnxt_tpa_info),
2509 GFP_KERNEL);
2510 if (!rxr->rx_tpa)
2511 return -ENOMEM;
2512 }
2513 }
2514 }
2515 return 0;
2516}
2517
2518static void bnxt_free_tx_rings(struct bnxt *bp)
2519{
2520 int i;
2521 struct pci_dev *pdev = bp->pdev;
2522
Michael Chanb6ab4b02016-01-02 23:44:59 -05002523 if (!bp->tx_ring)
Michael Chanc0c050c2015-10-22 16:01:17 -04002524 return;
2525
2526 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002527 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002528 struct bnxt_ring_struct *ring;
2529
Michael Chanc0c050c2015-10-22 16:01:17 -04002530 if (txr->tx_push) {
2531 dma_free_coherent(&pdev->dev, bp->tx_push_size,
2532 txr->tx_push, txr->tx_push_mapping);
2533 txr->tx_push = NULL;
2534 }
2535
2536 ring = &txr->tx_ring_struct;
2537
Michael Chan6fe19882018-10-14 07:02:41 -04002538 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002539 }
2540}
2541
2542static int bnxt_alloc_tx_rings(struct bnxt *bp)
2543{
2544 int i, j, rc;
2545 struct pci_dev *pdev = bp->pdev;
2546
2547 bp->tx_push_size = 0;
2548 if (bp->tx_push_thresh) {
2549 int push_size;
2550
2551 push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
2552 bp->tx_push_thresh);
2553
Michael Chan4419dbe2016-02-10 17:33:49 -05002554 if (push_size > 256) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002555 push_size = 0;
2556 bp->tx_push_thresh = 0;
2557 }
2558
2559 bp->tx_push_size = push_size;
2560 }
2561
2562 for (i = 0, j = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002563 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002564 struct bnxt_ring_struct *ring;
Michael Chan2e8ef772018-04-26 17:44:31 -04002565 u8 qidx;
Michael Chanc0c050c2015-10-22 16:01:17 -04002566
Michael Chanc0c050c2015-10-22 16:01:17 -04002567 ring = &txr->tx_ring_struct;
2568
Michael Chan6fe19882018-10-14 07:02:41 -04002569 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002570 if (rc)
2571 return rc;
2572
Michael Chan9899bb52018-03-31 13:54:16 -04002573 ring->grp_idx = txr->bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04002574 if (bp->tx_push_size) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002575 dma_addr_t mapping;
2576
2577 /* One pre-allocated DMA buffer to backup
2578 * TX push operation
2579 */
2580 txr->tx_push = dma_alloc_coherent(&pdev->dev,
2581 bp->tx_push_size,
2582 &txr->tx_push_mapping,
2583 GFP_KERNEL);
2584
2585 if (!txr->tx_push)
2586 return -ENOMEM;
2587
Michael Chanc0c050c2015-10-22 16:01:17 -04002588 mapping = txr->tx_push_mapping +
2589 sizeof(struct tx_push_bd);
Michael Chan4419dbe2016-02-10 17:33:49 -05002590 txr->data_mapping = cpu_to_le64(mapping);
Michael Chanc0c050c2015-10-22 16:01:17 -04002591
Michael Chan4419dbe2016-02-10 17:33:49 -05002592 memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
Michael Chanc0c050c2015-10-22 16:01:17 -04002593 }
Michael Chan2e8ef772018-04-26 17:44:31 -04002594 qidx = bp->tc_to_qidx[j];
2595 ring->queue_id = bp->q_info[qidx].queue_id;
Michael Chan5f449242017-02-06 16:55:40 -05002596 if (i < bp->tx_nr_rings_xdp)
2597 continue;
Michael Chanc0c050c2015-10-22 16:01:17 -04002598 if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
2599 j++;
2600 }
2601 return 0;
2602}
2603
2604static void bnxt_free_cp_rings(struct bnxt *bp)
2605{
2606 int i;
2607
2608 if (!bp->bnapi)
2609 return;
2610
2611 for (i = 0; i < bp->cp_nr_rings; i++) {
2612 struct bnxt_napi *bnapi = bp->bnapi[i];
2613 struct bnxt_cp_ring_info *cpr;
2614 struct bnxt_ring_struct *ring;
Michael Chan50e3ab72018-10-14 07:02:49 -04002615 int j;
Michael Chanc0c050c2015-10-22 16:01:17 -04002616
2617 if (!bnapi)
2618 continue;
2619
2620 cpr = &bnapi->cp_ring;
2621 ring = &cpr->cp_ring_struct;
2622
Michael Chan6fe19882018-10-14 07:02:41 -04002623 bnxt_free_ring(bp, &ring->ring_mem);
Michael Chan50e3ab72018-10-14 07:02:49 -04002624
2625 for (j = 0; j < 2; j++) {
2626 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2627
2628 if (cpr2) {
2629 ring = &cpr2->cp_ring_struct;
2630 bnxt_free_ring(bp, &ring->ring_mem);
2631 kfree(cpr2);
2632 cpr->cp_ring_arr[j] = NULL;
2633 }
2634 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002635 }
2636}
2637
Michael Chan50e3ab72018-10-14 07:02:49 -04002638static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp)
2639{
2640 struct bnxt_ring_mem_info *rmem;
2641 struct bnxt_ring_struct *ring;
2642 struct bnxt_cp_ring_info *cpr;
2643 int rc;
2644
2645 cpr = kzalloc(sizeof(*cpr), GFP_KERNEL);
2646 if (!cpr)
2647 return NULL;
2648
2649 ring = &cpr->cp_ring_struct;
2650 rmem = &ring->ring_mem;
2651 rmem->nr_pages = bp->cp_nr_pages;
2652 rmem->page_size = HW_CMPD_RING_SIZE;
2653 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2654 rmem->dma_arr = cpr->cp_desc_mapping;
2655 rmem->flags = BNXT_RMEM_RING_PTE_FLAG;
2656 rc = bnxt_alloc_ring(bp, rmem);
2657 if (rc) {
2658 bnxt_free_ring(bp, rmem);
2659 kfree(cpr);
2660 cpr = NULL;
2661 }
2662 return cpr;
2663}
2664
Michael Chanc0c050c2015-10-22 16:01:17 -04002665static int bnxt_alloc_cp_rings(struct bnxt *bp)
2666{
Michael Chan50e3ab72018-10-14 07:02:49 -04002667 bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
Michael Chane5811b82018-03-31 13:54:18 -04002668 int i, rc, ulp_base_vec, ulp_msix;
Michael Chanc0c050c2015-10-22 16:01:17 -04002669
Michael Chane5811b82018-03-31 13:54:18 -04002670 ulp_msix = bnxt_get_ulp_msix_num(bp);
2671 ulp_base_vec = bnxt_get_ulp_msix_base(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04002672 for (i = 0; i < bp->cp_nr_rings; i++) {
2673 struct bnxt_napi *bnapi = bp->bnapi[i];
2674 struct bnxt_cp_ring_info *cpr;
2675 struct bnxt_ring_struct *ring;
2676
2677 if (!bnapi)
2678 continue;
2679
2680 cpr = &bnapi->cp_ring;
Michael Chan50e3ab72018-10-14 07:02:49 -04002681 cpr->bnapi = bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04002682 ring = &cpr->cp_ring_struct;
2683
Michael Chan6fe19882018-10-14 07:02:41 -04002684 rc = bnxt_alloc_ring(bp, &ring->ring_mem);
Michael Chanc0c050c2015-10-22 16:01:17 -04002685 if (rc)
2686 return rc;
Michael Chane5811b82018-03-31 13:54:18 -04002687
2688 if (ulp_msix && i >= ulp_base_vec)
2689 ring->map_idx = i + ulp_msix;
2690 else
2691 ring->map_idx = i;
Michael Chan50e3ab72018-10-14 07:02:49 -04002692
2693 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
2694 continue;
2695
2696 if (i < bp->rx_nr_rings) {
2697 struct bnxt_cp_ring_info *cpr2 =
2698 bnxt_alloc_cp_sub_ring(bp);
2699
2700 cpr->cp_ring_arr[BNXT_RX_HDL] = cpr2;
2701 if (!cpr2)
2702 return -ENOMEM;
2703 cpr2->bnapi = bnapi;
2704 }
2705 if ((sh && i < bp->tx_nr_rings) ||
2706 (!sh && i >= bp->rx_nr_rings)) {
2707 struct bnxt_cp_ring_info *cpr2 =
2708 bnxt_alloc_cp_sub_ring(bp);
2709
2710 cpr->cp_ring_arr[BNXT_TX_HDL] = cpr2;
2711 if (!cpr2)
2712 return -ENOMEM;
2713 cpr2->bnapi = bnapi;
2714 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002715 }
2716 return 0;
2717}
2718
2719static void bnxt_init_ring_struct(struct bnxt *bp)
2720{
2721 int i;
2722
2723 for (i = 0; i < bp->cp_nr_rings; i++) {
2724 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chan6fe19882018-10-14 07:02:41 -04002725 struct bnxt_ring_mem_info *rmem;
Michael Chanc0c050c2015-10-22 16:01:17 -04002726 struct bnxt_cp_ring_info *cpr;
2727 struct bnxt_rx_ring_info *rxr;
2728 struct bnxt_tx_ring_info *txr;
2729 struct bnxt_ring_struct *ring;
2730
2731 if (!bnapi)
2732 continue;
2733
2734 cpr = &bnapi->cp_ring;
2735 ring = &cpr->cp_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002736 rmem = &ring->ring_mem;
2737 rmem->nr_pages = bp->cp_nr_pages;
2738 rmem->page_size = HW_CMPD_RING_SIZE;
2739 rmem->pg_arr = (void **)cpr->cp_desc_ring;
2740 rmem->dma_arr = cpr->cp_desc_mapping;
2741 rmem->vmem_size = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04002742
Michael Chanb6ab4b02016-01-02 23:44:59 -05002743 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002744 if (!rxr)
2745 goto skip_rx;
2746
Michael Chanc0c050c2015-10-22 16:01:17 -04002747 ring = &rxr->rx_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002748 rmem = &ring->ring_mem;
2749 rmem->nr_pages = bp->rx_nr_pages;
2750 rmem->page_size = HW_RXBD_RING_SIZE;
2751 rmem->pg_arr = (void **)rxr->rx_desc_ring;
2752 rmem->dma_arr = rxr->rx_desc_mapping;
2753 rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages;
2754 rmem->vmem = (void **)&rxr->rx_buf_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04002755
2756 ring = &rxr->rx_agg_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002757 rmem = &ring->ring_mem;
2758 rmem->nr_pages = bp->rx_agg_nr_pages;
2759 rmem->page_size = HW_RXBD_RING_SIZE;
2760 rmem->pg_arr = (void **)rxr->rx_agg_desc_ring;
2761 rmem->dma_arr = rxr->rx_agg_desc_mapping;
2762 rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages;
2763 rmem->vmem = (void **)&rxr->rx_agg_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04002764
Michael Chan3b2b7d92016-01-02 23:45:00 -05002765skip_rx:
Michael Chanb6ab4b02016-01-02 23:44:59 -05002766 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05002767 if (!txr)
2768 continue;
2769
Michael Chanc0c050c2015-10-22 16:01:17 -04002770 ring = &txr->tx_ring_struct;
Michael Chan6fe19882018-10-14 07:02:41 -04002771 rmem = &ring->ring_mem;
2772 rmem->nr_pages = bp->tx_nr_pages;
2773 rmem->page_size = HW_RXBD_RING_SIZE;
2774 rmem->pg_arr = (void **)txr->tx_desc_ring;
2775 rmem->dma_arr = txr->tx_desc_mapping;
2776 rmem->vmem_size = SW_TXBD_RING_SIZE * bp->tx_nr_pages;
2777 rmem->vmem = (void **)&txr->tx_buf_ring;
Michael Chanc0c050c2015-10-22 16:01:17 -04002778 }
2779}
2780
2781static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type)
2782{
2783 int i;
2784 u32 prod;
2785 struct rx_bd **rx_buf_ring;
2786
Michael Chan6fe19882018-10-14 07:02:41 -04002787 rx_buf_ring = (struct rx_bd **)ring->ring_mem.pg_arr;
2788 for (i = 0, prod = 0; i < ring->ring_mem.nr_pages; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04002789 int j;
2790 struct rx_bd *rxbd;
2791
2792 rxbd = rx_buf_ring[i];
2793 if (!rxbd)
2794 continue;
2795
2796 for (j = 0; j < RX_DESC_CNT; j++, rxbd++, prod++) {
2797 rxbd->rx_bd_len_flags_type = cpu_to_le32(type);
2798 rxbd->rx_bd_opaque = prod;
2799 }
2800 }
2801}
2802
2803static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr)
2804{
2805 struct net_device *dev = bp->dev;
Michael Chanc0c050c2015-10-22 16:01:17 -04002806 struct bnxt_rx_ring_info *rxr;
2807 struct bnxt_ring_struct *ring;
2808 u32 prod, type;
2809 int i;
2810
Michael Chanc0c050c2015-10-22 16:01:17 -04002811 type = (bp->rx_buf_use_size << RX_BD_LEN_SHIFT) |
2812 RX_BD_TYPE_RX_PACKET_BD | RX_BD_FLAGS_EOP;
2813
2814 if (NET_IP_ALIGN == 2)
2815 type |= RX_BD_FLAGS_SOP;
2816
Michael Chanb6ab4b02016-01-02 23:44:59 -05002817 rxr = &bp->rx_ring[ring_nr];
Michael Chanc0c050c2015-10-22 16:01:17 -04002818 ring = &rxr->rx_ring_struct;
2819 bnxt_init_rxbd_pages(ring, type);
2820
Michael Chanc6d30e82017-02-06 16:55:42 -05002821 if (BNXT_RX_PAGE_MODE(bp) && bp->xdp_prog) {
2822 rxr->xdp_prog = bpf_prog_add(bp->xdp_prog, 1);
2823 if (IS_ERR(rxr->xdp_prog)) {
2824 int rc = PTR_ERR(rxr->xdp_prog);
2825
2826 rxr->xdp_prog = NULL;
2827 return rc;
2828 }
2829 }
Michael Chanc0c050c2015-10-22 16:01:17 -04002830 prod = rxr->rx_prod;
2831 for (i = 0; i < bp->rx_ring_size; i++) {
2832 if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL) != 0) {
2833 netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n",
2834 ring_nr, i, bp->rx_ring_size);
2835 break;
2836 }
2837 prod = NEXT_RX(prod);
2838 }
2839 rxr->rx_prod = prod;
2840 ring->fw_ring_id = INVALID_HW_RING_ID;
2841
Michael Chanedd0c2c2015-12-27 18:19:19 -05002842 ring = &rxr->rx_agg_ring_struct;
2843 ring->fw_ring_id = INVALID_HW_RING_ID;
2844
Michael Chanc0c050c2015-10-22 16:01:17 -04002845 if (!(bp->flags & BNXT_FLAG_AGG_RINGS))
2846 return 0;
2847
Michael Chan2839f282016-04-25 02:30:50 -04002848 type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) |
Michael Chanc0c050c2015-10-22 16:01:17 -04002849 RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP;
2850
2851 bnxt_init_rxbd_pages(ring, type);
2852
2853 prod = rxr->rx_agg_prod;
2854 for (i = 0; i < bp->rx_agg_ring_size; i++) {
2855 if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL) != 0) {
2856 netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n",
2857 ring_nr, i, bp->rx_ring_size);
2858 break;
2859 }
2860 prod = NEXT_RX_AGG(prod);
2861 }
2862 rxr->rx_agg_prod = prod;
Michael Chanc0c050c2015-10-22 16:01:17 -04002863
2864 if (bp->flags & BNXT_FLAG_TPA) {
2865 if (rxr->rx_tpa) {
2866 u8 *data;
2867 dma_addr_t mapping;
2868
2869 for (i = 0; i < MAX_TPA; i++) {
2870 data = __bnxt_alloc_rx_data(bp, &mapping,
2871 GFP_KERNEL);
2872 if (!data)
2873 return -ENOMEM;
2874
2875 rxr->rx_tpa[i].data = data;
Michael Chanb3dba772017-02-06 16:55:35 -05002876 rxr->rx_tpa[i].data_ptr = data + bp->rx_offset;
Michael Chanc0c050c2015-10-22 16:01:17 -04002877 rxr->rx_tpa[i].mapping = mapping;
2878 }
2879 } else {
2880 netdev_err(bp->dev, "No resource allocated for LRO/GRO\n");
2881 return -ENOMEM;
2882 }
2883 }
2884
2885 return 0;
2886}
2887
Sankar Patchineelam22479252017-03-28 19:47:29 -04002888static void bnxt_init_cp_rings(struct bnxt *bp)
2889{
Michael Chan3e08b182018-10-14 07:02:52 -04002890 int i, j;
Sankar Patchineelam22479252017-03-28 19:47:29 -04002891
2892 for (i = 0; i < bp->cp_nr_rings; i++) {
2893 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
2894 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
2895
2896 ring->fw_ring_id = INVALID_HW_RING_ID;
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05002897 cpr->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2898 cpr->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
Michael Chan3e08b182018-10-14 07:02:52 -04002899 for (j = 0; j < 2; j++) {
2900 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
2901
2902 if (!cpr2)
2903 continue;
2904
2905 ring = &cpr2->cp_ring_struct;
2906 ring->fw_ring_id = INVALID_HW_RING_ID;
2907 cpr2->rx_ring_coal.coal_ticks = bp->rx_coal.coal_ticks;
2908 cpr2->rx_ring_coal.coal_bufs = bp->rx_coal.coal_bufs;
2909 }
Sankar Patchineelam22479252017-03-28 19:47:29 -04002910 }
2911}
2912
Michael Chanc0c050c2015-10-22 16:01:17 -04002913static int bnxt_init_rx_rings(struct bnxt *bp)
2914{
2915 int i, rc = 0;
2916
Michael Chanc61fb992017-02-06 16:55:36 -05002917 if (BNXT_RX_PAGE_MODE(bp)) {
Michael Chanc6d30e82017-02-06 16:55:42 -05002918 bp->rx_offset = NET_IP_ALIGN + XDP_PACKET_HEADROOM;
2919 bp->rx_dma_offset = XDP_PACKET_HEADROOM;
Michael Chanc61fb992017-02-06 16:55:36 -05002920 } else {
2921 bp->rx_offset = BNXT_RX_OFFSET;
2922 bp->rx_dma_offset = BNXT_RX_DMA_OFFSET;
2923 }
Michael Chanb3dba772017-02-06 16:55:35 -05002924
Michael Chanc0c050c2015-10-22 16:01:17 -04002925 for (i = 0; i < bp->rx_nr_rings; i++) {
2926 rc = bnxt_init_one_rx_ring(bp, i);
2927 if (rc)
2928 break;
2929 }
2930
2931 return rc;
2932}
2933
2934static int bnxt_init_tx_rings(struct bnxt *bp)
2935{
2936 u16 i;
2937
2938 bp->tx_wake_thresh = max_t(int, bp->tx_ring_size / 2,
2939 MAX_SKB_FRAGS + 1);
2940
2941 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05002942 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04002943 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
2944
2945 ring->fw_ring_id = INVALID_HW_RING_ID;
2946 }
2947
2948 return 0;
2949}
2950
2951static void bnxt_free_ring_grps(struct bnxt *bp)
2952{
2953 kfree(bp->grp_info);
2954 bp->grp_info = NULL;
2955}
2956
2957static int bnxt_init_ring_grps(struct bnxt *bp, bool irq_re_init)
2958{
2959 int i;
2960
2961 if (irq_re_init) {
2962 bp->grp_info = kcalloc(bp->cp_nr_rings,
2963 sizeof(struct bnxt_ring_grp_info),
2964 GFP_KERNEL);
2965 if (!bp->grp_info)
2966 return -ENOMEM;
2967 }
2968 for (i = 0; i < bp->cp_nr_rings; i++) {
2969 if (irq_re_init)
2970 bp->grp_info[i].fw_stats_ctx = INVALID_HW_RING_ID;
2971 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
2972 bp->grp_info[i].rx_fw_ring_id = INVALID_HW_RING_ID;
2973 bp->grp_info[i].agg_fw_ring_id = INVALID_HW_RING_ID;
2974 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
2975 }
2976 return 0;
2977}
2978
2979static void bnxt_free_vnics(struct bnxt *bp)
2980{
2981 kfree(bp->vnic_info);
2982 bp->vnic_info = NULL;
2983 bp->nr_vnics = 0;
2984}
2985
2986static int bnxt_alloc_vnics(struct bnxt *bp)
2987{
2988 int num_vnics = 1;
2989
2990#ifdef CONFIG_RFS_ACCEL
2991 if (bp->flags & BNXT_FLAG_RFS)
2992 num_vnics += bp->rx_nr_rings;
2993#endif
2994
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04002995 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
2996 num_vnics++;
2997
Michael Chanc0c050c2015-10-22 16:01:17 -04002998 bp->vnic_info = kcalloc(num_vnics, sizeof(struct bnxt_vnic_info),
2999 GFP_KERNEL);
3000 if (!bp->vnic_info)
3001 return -ENOMEM;
3002
3003 bp->nr_vnics = num_vnics;
3004 return 0;
3005}
3006
3007static void bnxt_init_vnics(struct bnxt *bp)
3008{
3009 int i;
3010
3011 for (i = 0; i < bp->nr_vnics; i++) {
3012 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
Michael Chan44c6f722018-10-14 07:02:53 -04003013 int j;
Michael Chanc0c050c2015-10-22 16:01:17 -04003014
3015 vnic->fw_vnic_id = INVALID_HW_RING_ID;
Michael Chan44c6f722018-10-14 07:02:53 -04003016 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++)
3017 vnic->fw_rss_cos_lb_ctx[j] = INVALID_HW_RING_ID;
3018
Michael Chanc0c050c2015-10-22 16:01:17 -04003019 vnic->fw_l2_ctx_id = INVALID_HW_RING_ID;
3020
3021 if (bp->vnic_info[i].rss_hash_key) {
3022 if (i == 0)
3023 prandom_bytes(vnic->rss_hash_key,
3024 HW_HASH_KEY_SIZE);
3025 else
3026 memcpy(vnic->rss_hash_key,
3027 bp->vnic_info[0].rss_hash_key,
3028 HW_HASH_KEY_SIZE);
3029 }
3030 }
3031}
3032
3033static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
3034{
3035 int pages;
3036
3037 pages = ring_size / desc_per_pg;
3038
3039 if (!pages)
3040 return 1;
3041
3042 pages++;
3043
3044 while (pages & (pages - 1))
3045 pages++;
3046
3047 return pages;
3048}
3049
Michael Chanc6d30e82017-02-06 16:55:42 -05003050void bnxt_set_tpa_flags(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04003051{
3052 bp->flags &= ~BNXT_FLAG_TPA;
Michael Chan341138c2017-01-13 01:32:01 -05003053 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
3054 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04003055 if (bp->dev->features & NETIF_F_LRO)
3056 bp->flags |= BNXT_FLAG_LRO;
Michael Chan1054aee2017-12-16 03:09:42 -05003057 else if (bp->dev->features & NETIF_F_GRO_HW)
Michael Chanc0c050c2015-10-22 16:01:17 -04003058 bp->flags |= BNXT_FLAG_GRO;
3059}
3060
3061/* bp->rx_ring_size, bp->tx_ring_size, dev->mtu, BNXT_FLAG_{G|L}RO flags must
3062 * be set on entry.
3063 */
3064void bnxt_set_ring_params(struct bnxt *bp)
3065{
3066 u32 ring_size, rx_size, rx_space;
3067 u32 agg_factor = 0, agg_ring_size = 0;
3068
3069 /* 8 for CRC and VLAN */
3070 rx_size = SKB_DATA_ALIGN(bp->dev->mtu + ETH_HLEN + NET_IP_ALIGN + 8);
3071
3072 rx_space = rx_size + NET_SKB_PAD +
3073 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3074
3075 bp->rx_copy_thresh = BNXT_RX_COPY_THRESH;
3076 ring_size = bp->rx_ring_size;
3077 bp->rx_agg_ring_size = 0;
3078 bp->rx_agg_nr_pages = 0;
3079
3080 if (bp->flags & BNXT_FLAG_TPA)
Michael Chan2839f282016-04-25 02:30:50 -04003081 agg_factor = min_t(u32, 4, 65536 / BNXT_RX_PAGE_SIZE);
Michael Chanc0c050c2015-10-22 16:01:17 -04003082
3083 bp->flags &= ~BNXT_FLAG_JUMBO;
Michael Chanbdbd1eb2016-12-29 12:13:43 -05003084 if (rx_space > PAGE_SIZE && !(bp->flags & BNXT_FLAG_NO_AGG_RINGS)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003085 u32 jumbo_factor;
3086
3087 bp->flags |= BNXT_FLAG_JUMBO;
3088 jumbo_factor = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
3089 if (jumbo_factor > agg_factor)
3090 agg_factor = jumbo_factor;
3091 }
3092 agg_ring_size = ring_size * agg_factor;
3093
3094 if (agg_ring_size) {
3095 bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size,
3096 RX_DESC_CNT);
3097 if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) {
3098 u32 tmp = agg_ring_size;
3099
3100 bp->rx_agg_nr_pages = MAX_RX_AGG_PAGES;
3101 agg_ring_size = MAX_RX_AGG_PAGES * RX_DESC_CNT - 1;
3102 netdev_warn(bp->dev, "rx agg ring size %d reduced to %d.\n",
3103 tmp, agg_ring_size);
3104 }
3105 bp->rx_agg_ring_size = agg_ring_size;
3106 bp->rx_agg_ring_mask = (bp->rx_agg_nr_pages * RX_DESC_CNT) - 1;
3107 rx_size = SKB_DATA_ALIGN(BNXT_RX_COPY_THRESH + NET_IP_ALIGN);
3108 rx_space = rx_size + NET_SKB_PAD +
3109 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
3110 }
3111
3112 bp->rx_buf_use_size = rx_size;
3113 bp->rx_buf_size = rx_space;
3114
3115 bp->rx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, RX_DESC_CNT);
3116 bp->rx_ring_mask = (bp->rx_nr_pages * RX_DESC_CNT) - 1;
3117
3118 ring_size = bp->tx_ring_size;
3119 bp->tx_nr_pages = bnxt_calc_nr_ring_pages(ring_size, TX_DESC_CNT);
3120 bp->tx_ring_mask = (bp->tx_nr_pages * TX_DESC_CNT) - 1;
3121
3122 ring_size = bp->rx_ring_size * (2 + agg_factor) + bp->tx_ring_size;
3123 bp->cp_ring_size = ring_size;
3124
3125 bp->cp_nr_pages = bnxt_calc_nr_ring_pages(ring_size, CP_DESC_CNT);
3126 if (bp->cp_nr_pages > MAX_CP_PAGES) {
3127 bp->cp_nr_pages = MAX_CP_PAGES;
3128 bp->cp_ring_size = MAX_CP_PAGES * CP_DESC_CNT - 1;
3129 netdev_warn(bp->dev, "completion ring size %d reduced to %d.\n",
3130 ring_size, bp->cp_ring_size);
3131 }
3132 bp->cp_bit = bp->cp_nr_pages * CP_DESC_CNT;
3133 bp->cp_ring_mask = bp->cp_bit - 1;
3134}
3135
Jesper Dangaard Brouer96a86042018-01-03 11:25:44 +01003136/* Changing allocation mode of RX rings.
3137 * TODO: Update when extending xdp_rxq_info to support allocation modes.
3138 */
Michael Chanc61fb992017-02-06 16:55:36 -05003139int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode)
Michael Chan6bb19472017-02-06 16:55:32 -05003140{
Michael Chanc61fb992017-02-06 16:55:36 -05003141 if (page_mode) {
3142 if (bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU)
3143 return -EOPNOTSUPP;
Michael Chan7eb9bb32017-10-26 11:51:25 -04003144 bp->dev->max_mtu =
3145 min_t(u16, bp->max_mtu, BNXT_MAX_PAGE_MODE_MTU);
Michael Chanc61fb992017-02-06 16:55:36 -05003146 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
3147 bp->flags |= BNXT_FLAG_NO_AGG_RINGS | BNXT_FLAG_RX_PAGE_MODE;
Michael Chanc61fb992017-02-06 16:55:36 -05003148 bp->rx_dir = DMA_BIDIRECTIONAL;
3149 bp->rx_skb_func = bnxt_rx_page_skb;
Michael Chan1054aee2017-12-16 03:09:42 -05003150 /* Disable LRO or GRO_HW */
3151 netdev_update_features(bp->dev);
Michael Chanc61fb992017-02-06 16:55:36 -05003152 } else {
Michael Chan7eb9bb32017-10-26 11:51:25 -04003153 bp->dev->max_mtu = bp->max_mtu;
Michael Chanc61fb992017-02-06 16:55:36 -05003154 bp->flags &= ~BNXT_FLAG_RX_PAGE_MODE;
3155 bp->rx_dir = DMA_FROM_DEVICE;
3156 bp->rx_skb_func = bnxt_rx_skb;
3157 }
Michael Chan6bb19472017-02-06 16:55:32 -05003158 return 0;
3159}
3160
Michael Chanc0c050c2015-10-22 16:01:17 -04003161static void bnxt_free_vnic_attributes(struct bnxt *bp)
3162{
3163 int i;
3164 struct bnxt_vnic_info *vnic;
3165 struct pci_dev *pdev = bp->pdev;
3166
3167 if (!bp->vnic_info)
3168 return;
3169
3170 for (i = 0; i < bp->nr_vnics; i++) {
3171 vnic = &bp->vnic_info[i];
3172
3173 kfree(vnic->fw_grp_ids);
3174 vnic->fw_grp_ids = NULL;
3175
3176 kfree(vnic->uc_list);
3177 vnic->uc_list = NULL;
3178
3179 if (vnic->mc_list) {
3180 dma_free_coherent(&pdev->dev, vnic->mc_list_size,
3181 vnic->mc_list, vnic->mc_list_mapping);
3182 vnic->mc_list = NULL;
3183 }
3184
3185 if (vnic->rss_table) {
3186 dma_free_coherent(&pdev->dev, PAGE_SIZE,
3187 vnic->rss_table,
3188 vnic->rss_table_dma_addr);
3189 vnic->rss_table = NULL;
3190 }
3191
3192 vnic->rss_hash_key = NULL;
3193 vnic->flags = 0;
3194 }
3195}
3196
3197static int bnxt_alloc_vnic_attributes(struct bnxt *bp)
3198{
3199 int i, rc = 0, size;
3200 struct bnxt_vnic_info *vnic;
3201 struct pci_dev *pdev = bp->pdev;
3202 int max_rings;
3203
3204 for (i = 0; i < bp->nr_vnics; i++) {
3205 vnic = &bp->vnic_info[i];
3206
3207 if (vnic->flags & BNXT_VNIC_UCAST_FLAG) {
3208 int mem_size = (BNXT_MAX_UC_ADDRS - 1) * ETH_ALEN;
3209
3210 if (mem_size > 0) {
3211 vnic->uc_list = kmalloc(mem_size, GFP_KERNEL);
3212 if (!vnic->uc_list) {
3213 rc = -ENOMEM;
3214 goto out;
3215 }
3216 }
3217 }
3218
3219 if (vnic->flags & BNXT_VNIC_MCAST_FLAG) {
3220 vnic->mc_list_size = BNXT_MAX_MC_ADDRS * ETH_ALEN;
3221 vnic->mc_list =
3222 dma_alloc_coherent(&pdev->dev,
3223 vnic->mc_list_size,
3224 &vnic->mc_list_mapping,
3225 GFP_KERNEL);
3226 if (!vnic->mc_list) {
3227 rc = -ENOMEM;
3228 goto out;
3229 }
3230 }
3231
Michael Chan44c6f722018-10-14 07:02:53 -04003232 if (bp->flags & BNXT_FLAG_CHIP_P5)
3233 goto vnic_skip_grps;
3234
Michael Chanc0c050c2015-10-22 16:01:17 -04003235 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
3236 max_rings = bp->rx_nr_rings;
3237 else
3238 max_rings = 1;
3239
3240 vnic->fw_grp_ids = kcalloc(max_rings, sizeof(u16), GFP_KERNEL);
3241 if (!vnic->fw_grp_ids) {
3242 rc = -ENOMEM;
3243 goto out;
3244 }
Michael Chan44c6f722018-10-14 07:02:53 -04003245vnic_skip_grps:
Michael Chanae10ae72016-12-29 12:13:38 -05003246 if ((bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
3247 !(vnic->flags & BNXT_VNIC_RSS_FLAG))
3248 continue;
3249
Michael Chanc0c050c2015-10-22 16:01:17 -04003250 /* Allocate rss table and hash key */
3251 vnic->rss_table = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3252 &vnic->rss_table_dma_addr,
3253 GFP_KERNEL);
3254 if (!vnic->rss_table) {
3255 rc = -ENOMEM;
3256 goto out;
3257 }
3258
3259 size = L1_CACHE_ALIGN(HW_HASH_INDEX_SIZE * sizeof(u16));
3260
3261 vnic->rss_hash_key = ((void *)vnic->rss_table) + size;
3262 vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + size;
3263 }
3264 return 0;
3265
3266out:
3267 return rc;
3268}
3269
3270static void bnxt_free_hwrm_resources(struct bnxt *bp)
3271{
3272 struct pci_dev *pdev = bp->pdev;
3273
Venkat Duvvurua2bf74f2018-10-05 00:26:02 -04003274 if (bp->hwrm_cmd_resp_addr) {
3275 dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
3276 bp->hwrm_cmd_resp_dma_addr);
3277 bp->hwrm_cmd_resp_addr = NULL;
3278 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003279}
3280
3281static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
3282{
3283 struct pci_dev *pdev = bp->pdev;
3284
3285 bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
3286 &bp->hwrm_cmd_resp_dma_addr,
3287 GFP_KERNEL);
3288 if (!bp->hwrm_cmd_resp_addr)
3289 return -ENOMEM;
Michael Chanc0c050c2015-10-22 16:01:17 -04003290
3291 return 0;
3292}
3293
Deepak Khungare605db82017-05-29 19:06:04 -04003294static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp)
3295{
3296 if (bp->hwrm_short_cmd_req_addr) {
3297 struct pci_dev *pdev = bp->pdev;
3298
Michael Chan1dfddc42018-10-14 07:02:39 -04003299 dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
Deepak Khungare605db82017-05-29 19:06:04 -04003300 bp->hwrm_short_cmd_req_addr,
3301 bp->hwrm_short_cmd_req_dma_addr);
3302 bp->hwrm_short_cmd_req_addr = NULL;
3303 }
3304}
3305
3306static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp)
3307{
3308 struct pci_dev *pdev = bp->pdev;
3309
3310 bp->hwrm_short_cmd_req_addr =
Michael Chan1dfddc42018-10-14 07:02:39 -04003311 dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len,
Deepak Khungare605db82017-05-29 19:06:04 -04003312 &bp->hwrm_short_cmd_req_dma_addr,
3313 GFP_KERNEL);
3314 if (!bp->hwrm_short_cmd_req_addr)
3315 return -ENOMEM;
3316
3317 return 0;
3318}
3319
Michael Chanc0c050c2015-10-22 16:01:17 -04003320static void bnxt_free_stats(struct bnxt *bp)
3321{
3322 u32 size, i;
3323 struct pci_dev *pdev = bp->pdev;
3324
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003325 bp->flags &= ~BNXT_FLAG_PORT_STATS;
3326 bp->flags &= ~BNXT_FLAG_PORT_STATS_EXT;
3327
Michael Chan3bdf56c2016-03-07 15:38:45 -05003328 if (bp->hw_rx_port_stats) {
3329 dma_free_coherent(&pdev->dev, bp->hw_port_stats_size,
3330 bp->hw_rx_port_stats,
3331 bp->hw_rx_port_stats_map);
3332 bp->hw_rx_port_stats = NULL;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003333 }
3334
Michael Chan36e53342018-10-14 07:02:38 -04003335 if (bp->hw_tx_port_stats_ext) {
3336 dma_free_coherent(&pdev->dev, sizeof(struct tx_port_stats_ext),
3337 bp->hw_tx_port_stats_ext,
3338 bp->hw_tx_port_stats_ext_map);
3339 bp->hw_tx_port_stats_ext = NULL;
3340 }
3341
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003342 if (bp->hw_rx_port_stats_ext) {
3343 dma_free_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext),
3344 bp->hw_rx_port_stats_ext,
3345 bp->hw_rx_port_stats_ext_map);
3346 bp->hw_rx_port_stats_ext = NULL;
Michael Chan3bdf56c2016-03-07 15:38:45 -05003347 }
3348
Michael Chanc0c050c2015-10-22 16:01:17 -04003349 if (!bp->bnapi)
3350 return;
3351
3352 size = sizeof(struct ctx_hw_stats);
3353
3354 for (i = 0; i < bp->cp_nr_rings; i++) {
3355 struct bnxt_napi *bnapi = bp->bnapi[i];
3356 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3357
3358 if (cpr->hw_stats) {
3359 dma_free_coherent(&pdev->dev, size, cpr->hw_stats,
3360 cpr->hw_stats_map);
3361 cpr->hw_stats = NULL;
3362 }
3363 }
3364}
3365
3366static int bnxt_alloc_stats(struct bnxt *bp)
3367{
3368 u32 size, i;
3369 struct pci_dev *pdev = bp->pdev;
3370
3371 size = sizeof(struct ctx_hw_stats);
3372
3373 for (i = 0; i < bp->cp_nr_rings; i++) {
3374 struct bnxt_napi *bnapi = bp->bnapi[i];
3375 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3376
3377 cpr->hw_stats = dma_alloc_coherent(&pdev->dev, size,
3378 &cpr->hw_stats_map,
3379 GFP_KERNEL);
3380 if (!cpr->hw_stats)
3381 return -ENOMEM;
3382
3383 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
3384 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05003385
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04003386 if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05003387 bp->hw_port_stats_size = sizeof(struct rx_port_stats) +
3388 sizeof(struct tx_port_stats) + 1024;
3389
3390 bp->hw_rx_port_stats =
3391 dma_alloc_coherent(&pdev->dev, bp->hw_port_stats_size,
3392 &bp->hw_rx_port_stats_map,
3393 GFP_KERNEL);
3394 if (!bp->hw_rx_port_stats)
3395 return -ENOMEM;
3396
3397 bp->hw_tx_port_stats = (void *)(bp->hw_rx_port_stats + 1) +
3398 512;
3399 bp->hw_tx_port_stats_map = bp->hw_rx_port_stats_map +
3400 sizeof(struct rx_port_stats) + 512;
3401 bp->flags |= BNXT_FLAG_PORT_STATS;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003402
3403 /* Display extended statistics only if FW supports it */
3404 if (bp->hwrm_spec_code < 0x10804 ||
3405 bp->hwrm_spec_code == 0x10900)
3406 return 0;
3407
3408 bp->hw_rx_port_stats_ext =
3409 dma_zalloc_coherent(&pdev->dev,
3410 sizeof(struct rx_port_stats_ext),
3411 &bp->hw_rx_port_stats_ext_map,
3412 GFP_KERNEL);
3413 if (!bp->hw_rx_port_stats_ext)
3414 return 0;
3415
Michael Chan36e53342018-10-14 07:02:38 -04003416 if (bp->hwrm_spec_code >= 0x10902) {
3417 bp->hw_tx_port_stats_ext =
3418 dma_zalloc_coherent(&pdev->dev,
3419 sizeof(struct tx_port_stats_ext),
3420 &bp->hw_tx_port_stats_ext_map,
3421 GFP_KERNEL);
3422 }
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04003423 bp->flags |= BNXT_FLAG_PORT_STATS_EXT;
Michael Chan3bdf56c2016-03-07 15:38:45 -05003424 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003425 return 0;
3426}
3427
3428static void bnxt_clear_ring_indices(struct bnxt *bp)
3429{
3430 int i;
3431
3432 if (!bp->bnapi)
3433 return;
3434
3435 for (i = 0; i < bp->cp_nr_rings; i++) {
3436 struct bnxt_napi *bnapi = bp->bnapi[i];
3437 struct bnxt_cp_ring_info *cpr;
3438 struct bnxt_rx_ring_info *rxr;
3439 struct bnxt_tx_ring_info *txr;
3440
3441 if (!bnapi)
3442 continue;
3443
3444 cpr = &bnapi->cp_ring;
3445 cpr->cp_raw_cons = 0;
3446
Michael Chanb6ab4b02016-01-02 23:44:59 -05003447 txr = bnapi->tx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003448 if (txr) {
3449 txr->tx_prod = 0;
3450 txr->tx_cons = 0;
3451 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003452
Michael Chanb6ab4b02016-01-02 23:44:59 -05003453 rxr = bnapi->rx_ring;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003454 if (rxr) {
3455 rxr->rx_prod = 0;
3456 rxr->rx_agg_prod = 0;
3457 rxr->rx_sw_agg_prod = 0;
Michael Chan376a5b82016-05-10 19:17:59 -04003458 rxr->rx_next_cons = 0;
Michael Chan3b2b7d92016-01-02 23:45:00 -05003459 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003460 }
3461}
3462
3463static void bnxt_free_ntp_fltrs(struct bnxt *bp, bool irq_reinit)
3464{
3465#ifdef CONFIG_RFS_ACCEL
3466 int i;
3467
3468 /* Under rtnl_lock and all our NAPIs have been disabled. It's
3469 * safe to delete the hash table.
3470 */
3471 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
3472 struct hlist_head *head;
3473 struct hlist_node *tmp;
3474 struct bnxt_ntuple_filter *fltr;
3475
3476 head = &bp->ntp_fltr_hash_tbl[i];
3477 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
3478 hlist_del(&fltr->hash);
3479 kfree(fltr);
3480 }
3481 }
3482 if (irq_reinit) {
3483 kfree(bp->ntp_fltr_bmap);
3484 bp->ntp_fltr_bmap = NULL;
3485 }
3486 bp->ntp_fltr_count = 0;
3487#endif
3488}
3489
3490static int bnxt_alloc_ntp_fltrs(struct bnxt *bp)
3491{
3492#ifdef CONFIG_RFS_ACCEL
3493 int i, rc = 0;
3494
3495 if (!(bp->flags & BNXT_FLAG_RFS))
3496 return 0;
3497
3498 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++)
3499 INIT_HLIST_HEAD(&bp->ntp_fltr_hash_tbl[i]);
3500
3501 bp->ntp_fltr_count = 0;
Dan Carpenterac45bd92017-05-06 03:49:01 +03003502 bp->ntp_fltr_bmap = kcalloc(BITS_TO_LONGS(BNXT_NTP_FLTR_MAX_FLTR),
3503 sizeof(long),
Michael Chanc0c050c2015-10-22 16:01:17 -04003504 GFP_KERNEL);
3505
3506 if (!bp->ntp_fltr_bmap)
3507 rc = -ENOMEM;
3508
3509 return rc;
3510#else
3511 return 0;
3512#endif
3513}
3514
3515static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init)
3516{
3517 bnxt_free_vnic_attributes(bp);
3518 bnxt_free_tx_rings(bp);
3519 bnxt_free_rx_rings(bp);
3520 bnxt_free_cp_rings(bp);
3521 bnxt_free_ntp_fltrs(bp, irq_re_init);
3522 if (irq_re_init) {
3523 bnxt_free_stats(bp);
3524 bnxt_free_ring_grps(bp);
3525 bnxt_free_vnics(bp);
Michael Chana960dec2017-02-06 16:55:39 -05003526 kfree(bp->tx_ring_map);
3527 bp->tx_ring_map = NULL;
Michael Chanb6ab4b02016-01-02 23:44:59 -05003528 kfree(bp->tx_ring);
3529 bp->tx_ring = NULL;
3530 kfree(bp->rx_ring);
3531 bp->rx_ring = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04003532 kfree(bp->bnapi);
3533 bp->bnapi = NULL;
3534 } else {
3535 bnxt_clear_ring_indices(bp);
3536 }
3537}
3538
3539static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init)
3540{
Michael Chan01657bc2016-01-02 23:45:03 -05003541 int i, j, rc, size, arr_size;
Michael Chanc0c050c2015-10-22 16:01:17 -04003542 void *bnapi;
3543
3544 if (irq_re_init) {
3545 /* Allocate bnapi mem pointer array and mem block for
3546 * all queues
3547 */
3548 arr_size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi *) *
3549 bp->cp_nr_rings);
3550 size = L1_CACHE_ALIGN(sizeof(struct bnxt_napi));
3551 bnapi = kzalloc(arr_size + size * bp->cp_nr_rings, GFP_KERNEL);
3552 if (!bnapi)
3553 return -ENOMEM;
3554
3555 bp->bnapi = bnapi;
3556 bnapi += arr_size;
3557 for (i = 0; i < bp->cp_nr_rings; i++, bnapi += size) {
3558 bp->bnapi[i] = bnapi;
3559 bp->bnapi[i]->index = i;
3560 bp->bnapi[i]->bp = bp;
Michael Chane38287b2018-10-14 07:02:45 -04003561 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3562 struct bnxt_cp_ring_info *cpr =
3563 &bp->bnapi[i]->cp_ring;
3564
3565 cpr->cp_ring_struct.ring_mem.flags =
3566 BNXT_RMEM_RING_PTE_FLAG;
3567 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003568 }
3569
Michael Chanb6ab4b02016-01-02 23:44:59 -05003570 bp->rx_ring = kcalloc(bp->rx_nr_rings,
3571 sizeof(struct bnxt_rx_ring_info),
3572 GFP_KERNEL);
3573 if (!bp->rx_ring)
3574 return -ENOMEM;
3575
3576 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chane38287b2018-10-14 07:02:45 -04003577 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
3578
3579 if (bp->flags & BNXT_FLAG_CHIP_P5) {
3580 rxr->rx_ring_struct.ring_mem.flags =
3581 BNXT_RMEM_RING_PTE_FLAG;
3582 rxr->rx_agg_ring_struct.ring_mem.flags =
3583 BNXT_RMEM_RING_PTE_FLAG;
3584 }
3585 rxr->bnapi = bp->bnapi[i];
Michael Chanb6ab4b02016-01-02 23:44:59 -05003586 bp->bnapi[i]->rx_ring = &bp->rx_ring[i];
3587 }
3588
3589 bp->tx_ring = kcalloc(bp->tx_nr_rings,
3590 sizeof(struct bnxt_tx_ring_info),
3591 GFP_KERNEL);
3592 if (!bp->tx_ring)
3593 return -ENOMEM;
3594
Michael Chana960dec2017-02-06 16:55:39 -05003595 bp->tx_ring_map = kcalloc(bp->tx_nr_rings, sizeof(u16),
3596 GFP_KERNEL);
3597
3598 if (!bp->tx_ring_map)
3599 return -ENOMEM;
3600
Michael Chan01657bc2016-01-02 23:45:03 -05003601 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
3602 j = 0;
3603 else
3604 j = bp->rx_nr_rings;
3605
3606 for (i = 0; i < bp->tx_nr_rings; i++, j++) {
Michael Chane38287b2018-10-14 07:02:45 -04003607 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
3608
3609 if (bp->flags & BNXT_FLAG_CHIP_P5)
3610 txr->tx_ring_struct.ring_mem.flags =
3611 BNXT_RMEM_RING_PTE_FLAG;
3612 txr->bnapi = bp->bnapi[j];
3613 bp->bnapi[j]->tx_ring = txr;
Michael Chan5f449242017-02-06 16:55:40 -05003614 bp->tx_ring_map[i] = bp->tx_nr_rings_xdp + i;
Michael Chan38413402017-02-06 16:55:43 -05003615 if (i >= bp->tx_nr_rings_xdp) {
Michael Chane38287b2018-10-14 07:02:45 -04003616 txr->txq_index = i - bp->tx_nr_rings_xdp;
Michael Chan38413402017-02-06 16:55:43 -05003617 bp->bnapi[j]->tx_int = bnxt_tx_int;
3618 } else {
Michael Chanfa3e93e2017-02-06 16:55:41 -05003619 bp->bnapi[j]->flags |= BNXT_NAPI_FLAG_XDP;
Michael Chan38413402017-02-06 16:55:43 -05003620 bp->bnapi[j]->tx_int = bnxt_tx_int_xdp;
3621 }
Michael Chanb6ab4b02016-01-02 23:44:59 -05003622 }
3623
Michael Chanc0c050c2015-10-22 16:01:17 -04003624 rc = bnxt_alloc_stats(bp);
3625 if (rc)
3626 goto alloc_mem_err;
3627
3628 rc = bnxt_alloc_ntp_fltrs(bp);
3629 if (rc)
3630 goto alloc_mem_err;
3631
3632 rc = bnxt_alloc_vnics(bp);
3633 if (rc)
3634 goto alloc_mem_err;
3635 }
3636
3637 bnxt_init_ring_struct(bp);
3638
3639 rc = bnxt_alloc_rx_rings(bp);
3640 if (rc)
3641 goto alloc_mem_err;
3642
3643 rc = bnxt_alloc_tx_rings(bp);
3644 if (rc)
3645 goto alloc_mem_err;
3646
3647 rc = bnxt_alloc_cp_rings(bp);
3648 if (rc)
3649 goto alloc_mem_err;
3650
3651 bp->vnic_info[0].flags |= BNXT_VNIC_RSS_FLAG | BNXT_VNIC_MCAST_FLAG |
3652 BNXT_VNIC_UCAST_FLAG;
3653 rc = bnxt_alloc_vnic_attributes(bp);
3654 if (rc)
3655 goto alloc_mem_err;
3656 return 0;
3657
3658alloc_mem_err:
3659 bnxt_free_mem(bp, true);
3660 return rc;
3661}
3662
Michael Chan9d8bc092016-12-29 12:13:33 -05003663static void bnxt_disable_int(struct bnxt *bp)
3664{
3665 int i;
3666
3667 if (!bp->bnapi)
3668 return;
3669
3670 for (i = 0; i < bp->cp_nr_rings; i++) {
3671 struct bnxt_napi *bnapi = bp->bnapi[i];
3672 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chandaf1f1e2017-02-20 19:25:17 -05003673 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chan9d8bc092016-12-29 12:13:33 -05003674
Michael Chandaf1f1e2017-02-20 19:25:17 -05003675 if (ring->fw_ring_id != INVALID_HW_RING_ID)
Michael Chan697197e2018-10-14 07:02:46 -04003676 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chan9d8bc092016-12-29 12:13:33 -05003677 }
3678}
3679
Michael Chane5811b82018-03-31 13:54:18 -04003680static int bnxt_cp_num_to_irq_num(struct bnxt *bp, int n)
3681{
3682 struct bnxt_napi *bnapi = bp->bnapi[n];
3683 struct bnxt_cp_ring_info *cpr;
3684
3685 cpr = &bnapi->cp_ring;
3686 return cpr->cp_ring_struct.map_idx;
3687}
3688
Michael Chan9d8bc092016-12-29 12:13:33 -05003689static void bnxt_disable_int_sync(struct bnxt *bp)
3690{
3691 int i;
3692
3693 atomic_inc(&bp->intr_sem);
3694
3695 bnxt_disable_int(bp);
Michael Chane5811b82018-03-31 13:54:18 -04003696 for (i = 0; i < bp->cp_nr_rings; i++) {
3697 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
3698
3699 synchronize_irq(bp->irq_tbl[map_idx].vector);
3700 }
Michael Chan9d8bc092016-12-29 12:13:33 -05003701}
3702
3703static void bnxt_enable_int(struct bnxt *bp)
3704{
3705 int i;
3706
3707 atomic_set(&bp->intr_sem, 0);
3708 for (i = 0; i < bp->cp_nr_rings; i++) {
3709 struct bnxt_napi *bnapi = bp->bnapi[i];
3710 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
3711
Michael Chan697197e2018-10-14 07:02:46 -04003712 bnxt_db_nq_arm(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chan9d8bc092016-12-29 12:13:33 -05003713 }
3714}
3715
Michael Chanc0c050c2015-10-22 16:01:17 -04003716void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type,
3717 u16 cmpl_ring, u16 target_id)
3718{
Michael Chana8643e12016-02-26 04:00:05 -05003719 struct input *req = request;
Michael Chanc0c050c2015-10-22 16:01:17 -04003720
Michael Chana8643e12016-02-26 04:00:05 -05003721 req->req_type = cpu_to_le16(req_type);
3722 req->cmpl_ring = cpu_to_le16(cmpl_ring);
3723 req->target_id = cpu_to_le16(target_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003724 req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr);
3725}
3726
Michael Chanfbfbc482016-02-26 04:00:07 -05003727static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len,
3728 int timeout, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003729{
Michael Chana11fa2b2016-05-15 03:04:47 -04003730 int i, intr_process, rc, tmo_count;
Michael Chana8643e12016-02-26 04:00:05 -05003731 struct input *req = msg;
Michael Chanc0c050c2015-10-22 16:01:17 -04003732 u32 *data = msg;
Michael Chan845adfe2018-03-31 13:54:15 -04003733 __le32 *resp_len;
3734 u8 *valid;
Michael Chanc0c050c2015-10-22 16:01:17 -04003735 u16 cp_ring_id, len = 0;
3736 struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr;
Deepak Khungare605db82017-05-29 19:06:04 -04003737 u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN;
Vasundhara Volamebd58182017-12-01 03:13:05 -05003738 struct hwrm_short_input short_input = {0};
Michael Chanc0c050c2015-10-22 16:01:17 -04003739
Michael Chana8643e12016-02-26 04:00:05 -05003740 req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++);
Michael Chanc0c050c2015-10-22 16:01:17 -04003741 memset(resp, 0, PAGE_SIZE);
Michael Chana8643e12016-02-26 04:00:05 -05003742 cp_ring_id = le16_to_cpu(req->cmpl_ring);
Michael Chanc0c050c2015-10-22 16:01:17 -04003743 intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1;
3744
Michael Chan1dfddc42018-10-14 07:02:39 -04003745 if (msg_len > BNXT_HWRM_MAX_REQ_LEN) {
3746 if (msg_len > bp->hwrm_max_ext_req_len ||
3747 !bp->hwrm_short_cmd_req_addr)
3748 return -EINVAL;
3749 }
3750
3751 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
3752 msg_len > BNXT_HWRM_MAX_REQ_LEN) {
Deepak Khungare605db82017-05-29 19:06:04 -04003753 void *short_cmd_req = bp->hwrm_short_cmd_req_addr;
Michael Chan1dfddc42018-10-14 07:02:39 -04003754 u16 max_msg_len;
3755
3756 /* Set boundary for maximum extended request length for short
3757 * cmd format. If passed up from device use the max supported
3758 * internal req length.
3759 */
3760 max_msg_len = bp->hwrm_max_ext_req_len;
Deepak Khungare605db82017-05-29 19:06:04 -04003761
3762 memcpy(short_cmd_req, req, msg_len);
Michael Chan1dfddc42018-10-14 07:02:39 -04003763 if (msg_len < max_msg_len)
3764 memset(short_cmd_req + msg_len, 0,
3765 max_msg_len - msg_len);
Deepak Khungare605db82017-05-29 19:06:04 -04003766
3767 short_input.req_type = req->req_type;
3768 short_input.signature =
3769 cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD);
3770 short_input.size = cpu_to_le16(msg_len);
3771 short_input.req_addr =
3772 cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr);
3773
3774 data = (u32 *)&short_input;
3775 msg_len = sizeof(short_input);
3776
3777 /* Sync memory write before updating doorbell */
3778 wmb();
3779
3780 max_req_len = BNXT_HWRM_SHORT_REQ_LEN;
3781 }
3782
Michael Chanc0c050c2015-10-22 16:01:17 -04003783 /* Write request msg to hwrm channel */
3784 __iowrite32_copy(bp->bar0, data, msg_len / 4);
3785
Deepak Khungare605db82017-05-29 19:06:04 -04003786 for (i = msg_len; i < max_req_len; i += 4)
Michael Chand79979a2016-01-07 19:56:57 -05003787 writel(0, bp->bar0 + i);
3788
Michael Chanc0c050c2015-10-22 16:01:17 -04003789 /* currently supports only one outstanding message */
3790 if (intr_process)
Michael Chana8643e12016-02-26 04:00:05 -05003791 bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04003792
3793 /* Ring channel doorbell */
3794 writel(1, bp->bar0 + 0x100);
3795
Michael Chanff4fe812016-02-26 04:00:04 -05003796 if (!timeout)
3797 timeout = DFLT_HWRM_CMD_TIMEOUT;
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003798 /* convert timeout to usec */
3799 timeout *= 1000;
Michael Chanff4fe812016-02-26 04:00:04 -05003800
Michael Chanc0c050c2015-10-22 16:01:17 -04003801 i = 0;
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003802 /* Short timeout for the first few iterations:
3803 * number of loops = number of loops for short timeout +
3804 * number of loops for standard timeout.
3805 */
3806 tmo_count = HWRM_SHORT_TIMEOUT_COUNTER;
3807 timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER;
3808 tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT);
Michael Chan845adfe2018-03-31 13:54:15 -04003809 resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET;
Michael Chanc0c050c2015-10-22 16:01:17 -04003810 if (intr_process) {
3811 /* Wait until hwrm response cmpl interrupt is processed */
3812 while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID &&
Michael Chana11fa2b2016-05-15 03:04:47 -04003813 i++ < tmo_count) {
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003814 /* on first few passes, just barely sleep */
3815 if (i < HWRM_SHORT_TIMEOUT_COUNTER)
3816 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3817 HWRM_SHORT_MAX_TIMEOUT);
3818 else
3819 usleep_range(HWRM_MIN_TIMEOUT,
3820 HWRM_MAX_TIMEOUT);
Michael Chanc0c050c2015-10-22 16:01:17 -04003821 }
3822
3823 if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) {
3824 netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n",
Michael Chana8643e12016-02-26 04:00:05 -05003825 le16_to_cpu(req->req_type));
Michael Chanc0c050c2015-10-22 16:01:17 -04003826 return -1;
3827 }
Michael Chan845adfe2018-03-31 13:54:15 -04003828 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3829 HWRM_RESP_LEN_SFT;
3830 valid = bp->hwrm_cmd_resp_addr + len - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04003831 } else {
Michael Chancc559c12018-05-08 03:18:38 -04003832 int j;
3833
Michael Chanc0c050c2015-10-22 16:01:17 -04003834 /* Check if response len is updated */
Michael Chana11fa2b2016-05-15 03:04:47 -04003835 for (i = 0; i < tmo_count; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003836 len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >>
3837 HWRM_RESP_LEN_SFT;
3838 if (len)
3839 break;
Andy Gospodarek9751e8e2018-04-26 17:44:39 -04003840 /* on first few passes, just barely sleep */
3841 if (i < DFLT_HWRM_CMD_TIMEOUT)
3842 usleep_range(HWRM_SHORT_MIN_TIMEOUT,
3843 HWRM_SHORT_MAX_TIMEOUT);
3844 else
3845 usleep_range(HWRM_MIN_TIMEOUT,
3846 HWRM_MAX_TIMEOUT);
Michael Chanc0c050c2015-10-22 16:01:17 -04003847 }
3848
Michael Chana11fa2b2016-05-15 03:04:47 -04003849 if (i >= tmo_count) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003850 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n",
Michael Chancc559c12018-05-08 03:18:38 -04003851 HWRM_TOTAL_TIMEOUT(i),
3852 le16_to_cpu(req->req_type),
Michael Chan8578d6c2016-05-15 03:04:48 -04003853 le16_to_cpu(req->seq_id), len);
Michael Chanc0c050c2015-10-22 16:01:17 -04003854 return -1;
3855 }
3856
Michael Chan845adfe2018-03-31 13:54:15 -04003857 /* Last byte of resp contains valid bit */
3858 valid = bp->hwrm_cmd_resp_addr + len - 1;
Michael Chancc559c12018-05-08 03:18:38 -04003859 for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) {
Michael Chan845adfe2018-03-31 13:54:15 -04003860 /* make sure we read from updated DMA memory */
3861 dma_rmb();
3862 if (*valid)
Michael Chanc0c050c2015-10-22 16:01:17 -04003863 break;
Michael Chana11fa2b2016-05-15 03:04:47 -04003864 udelay(1);
Michael Chanc0c050c2015-10-22 16:01:17 -04003865 }
3866
Michael Chancc559c12018-05-08 03:18:38 -04003867 if (j >= HWRM_VALID_BIT_DELAY_USEC) {
Michael Chanc0c050c2015-10-22 16:01:17 -04003868 netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n",
Michael Chancc559c12018-05-08 03:18:38 -04003869 HWRM_TOTAL_TIMEOUT(i),
3870 le16_to_cpu(req->req_type),
Michael Chana8643e12016-02-26 04:00:05 -05003871 le16_to_cpu(req->seq_id), len, *valid);
Michael Chanc0c050c2015-10-22 16:01:17 -04003872 return -1;
3873 }
3874 }
3875
Michael Chan845adfe2018-03-31 13:54:15 -04003876 /* Zero valid bit for compatibility. Valid bit in an older spec
3877 * may become a new field in a newer spec. We must make sure that
3878 * a new field not implemented by old spec will read zero.
3879 */
3880 *valid = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04003881 rc = le16_to_cpu(resp->error_code);
Michael Chanfbfbc482016-02-26 04:00:07 -05003882 if (rc && !silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04003883 netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n",
3884 le16_to_cpu(resp->req_type),
3885 le16_to_cpu(resp->seq_id), rc);
Michael Chanfbfbc482016-02-26 04:00:07 -05003886 return rc;
3887}
3888
3889int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3890{
3891 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04003892}
3893
Michael Chancc72f3b2017-10-13 21:09:33 -04003894int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3895 int timeout)
3896{
3897 return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3898}
3899
Michael Chanc0c050c2015-10-22 16:01:17 -04003900int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout)
3901{
3902 int rc;
3903
3904 mutex_lock(&bp->hwrm_cmd_lock);
3905 rc = _hwrm_send_message(bp, msg, msg_len, timeout);
3906 mutex_unlock(&bp->hwrm_cmd_lock);
3907 return rc;
3908}
3909
Michael Chan90e209212016-02-26 04:00:08 -05003910int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
3911 int timeout)
3912{
3913 int rc;
3914
3915 mutex_lock(&bp->hwrm_cmd_lock);
3916 rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true);
3917 mutex_unlock(&bp->hwrm_cmd_lock);
3918 return rc;
3919}
3920
Michael Chana1653b12016-12-07 00:26:20 -05003921int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
3922 int bmap_size)
Michael Chanc0c050c2015-10-22 16:01:17 -04003923{
3924 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25be8622016-04-05 14:09:00 -04003925 DECLARE_BITMAP(async_events_bmap, 256);
3926 u32 *events = (u32 *)async_events_bmap;
Michael Chana1653b12016-12-07 00:26:20 -05003927 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003928
3929 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3930
3931 req.enables =
Michael Chana1653b12016-12-07 00:26:20 -05003932 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
Michael Chanc0c050c2015-10-22 16:01:17 -04003933
Michael Chan25be8622016-04-05 14:09:00 -04003934 memset(async_events_bmap, 0, sizeof(async_events_bmap));
3935 for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++)
3936 __set_bit(bnxt_async_events_arr[i], async_events_bmap);
3937
Michael Chana1653b12016-12-07 00:26:20 -05003938 if (bmap && bmap_size) {
3939 for (i = 0; i < bmap_size; i++) {
3940 if (test_bit(i, bmap))
3941 __set_bit(i, async_events_bmap);
3942 }
3943 }
3944
Michael Chan25be8622016-04-05 14:09:00 -04003945 for (i = 0; i < 8; i++)
3946 req.async_event_fwd[i] |= cpu_to_le32(events[i]);
3947
Michael Chana1653b12016-12-07 00:26:20 -05003948 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3949}
3950
3951static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
3952{
Michael Chan25e1acd2018-08-05 16:51:55 -04003953 struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chana1653b12016-12-07 00:26:20 -05003954 struct hwrm_func_drv_rgtr_input req = {0};
Michael Chan25e1acd2018-08-05 16:51:55 -04003955 int rc;
Michael Chana1653b12016-12-07 00:26:20 -05003956
3957 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
3958
3959 req.enables =
3960 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
3961 FUNC_DRV_RGTR_REQ_ENABLES_VER);
3962
Michael Chan11f15ed2016-04-05 14:08:55 -04003963 req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
Michael Chand4f52de02018-03-31 13:54:06 -04003964 req.flags = cpu_to_le32(FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE);
3965 req.ver_maj_8b = DRV_VER_MAJ;
3966 req.ver_min_8b = DRV_VER_MIN;
3967 req.ver_upd_8b = DRV_VER_UPD;
3968 req.ver_maj = cpu_to_le16(DRV_VER_MAJ);
3969 req.ver_min = cpu_to_le16(DRV_VER_MIN);
3970 req.ver_upd = cpu_to_le16(DRV_VER_UPD);
Michael Chanc0c050c2015-10-22 16:01:17 -04003971
3972 if (BNXT_PF(bp)) {
Michael Chan9b0436c2017-07-11 13:05:36 -04003973 u32 data[8];
Michael Chana1653b12016-12-07 00:26:20 -05003974 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04003975
Michael Chan9b0436c2017-07-11 13:05:36 -04003976 memset(data, 0, sizeof(data));
3977 for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) {
3978 u16 cmd = bnxt_vf_req_snif[i];
3979 unsigned int bit, idx;
3980
3981 idx = cmd / 32;
3982 bit = cmd % 32;
3983 data[idx] |= 1 << bit;
3984 }
Michael Chanc0c050c2015-10-22 16:01:17 -04003985
Michael Chande68f5de2015-12-09 19:35:41 -05003986 for (i = 0; i < 8; i++)
3987 req.vf_req_fwd[i] = cpu_to_le32(data[i]);
3988
Michael Chanc0c050c2015-10-22 16:01:17 -04003989 req.enables |=
3990 cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
3991 }
3992
Michael Chan25e1acd2018-08-05 16:51:55 -04003993 mutex_lock(&bp->hwrm_cmd_lock);
3994 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
3995 if (rc)
3996 rc = -EIO;
3997 else if (resp->flags &
3998 cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED))
3999 bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE;
4000 mutex_unlock(&bp->hwrm_cmd_lock);
4001 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04004002}
4003
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05004004static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp)
4005{
4006 struct hwrm_func_drv_unrgtr_input req = {0};
4007
4008 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1);
4009 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4010}
4011
Michael Chanc0c050c2015-10-22 16:01:17 -04004012static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type)
4013{
4014 u32 rc = 0;
4015 struct hwrm_tunnel_dst_port_free_input req = {0};
4016
4017 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1);
4018 req.tunnel_type = tunnel_type;
4019
4020 switch (tunnel_type) {
4021 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN:
4022 req.tunnel_dst_port_id = bp->vxlan_fw_dst_port_id;
4023 break;
4024 case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE:
4025 req.tunnel_dst_port_id = bp->nge_fw_dst_port_id;
4026 break;
4027 default:
4028 break;
4029 }
4030
4031 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4032 if (rc)
4033 netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n",
4034 rc);
4035 return rc;
4036}
4037
4038static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port,
4039 u8 tunnel_type)
4040{
4041 u32 rc = 0;
4042 struct hwrm_tunnel_dst_port_alloc_input req = {0};
4043 struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4044
4045 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1);
4046
4047 req.tunnel_type = tunnel_type;
4048 req.tunnel_dst_port_val = port;
4049
4050 mutex_lock(&bp->hwrm_cmd_lock);
4051 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4052 if (rc) {
4053 netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n",
4054 rc);
4055 goto err_out;
4056 }
4057
Christophe Jaillet57aac712016-11-22 06:14:40 +01004058 switch (tunnel_type) {
4059 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN:
Michael Chanc0c050c2015-10-22 16:01:17 -04004060 bp->vxlan_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01004061 break;
4062 case TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE:
Michael Chanc0c050c2015-10-22 16:01:17 -04004063 bp->nge_fw_dst_port_id = resp->tunnel_dst_port_id;
Christophe Jaillet57aac712016-11-22 06:14:40 +01004064 break;
4065 default:
4066 break;
4067 }
4068
Michael Chanc0c050c2015-10-22 16:01:17 -04004069err_out:
4070 mutex_unlock(&bp->hwrm_cmd_lock);
4071 return rc;
4072}
4073
4074static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id)
4075{
4076 struct hwrm_cfa_l2_set_rx_mask_input req = {0};
4077 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4078
4079 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1);
Michael Chanc1935542015-12-27 18:19:28 -05004080 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004081
4082 req.num_mc_entries = cpu_to_le32(vnic->mc_list_count);
4083 req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping);
4084 req.mask = cpu_to_le32(vnic->rx_mask);
4085 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4086}
4087
4088#ifdef CONFIG_RFS_ACCEL
4089static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp,
4090 struct bnxt_ntuple_filter *fltr)
4091{
4092 struct hwrm_cfa_ntuple_filter_free_input req = {0};
4093
4094 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1);
4095 req.ntuple_filter_id = fltr->filter_id;
4096 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4097}
4098
4099#define BNXT_NTP_FLTR_FLAGS \
4100 (CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_L2_FILTER_ID | \
4101 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE | \
4102 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR | \
4103 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE | \
4104 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | \
4105 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR_MASK | \
4106 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | \
4107 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR_MASK | \
4108 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL | \
4109 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT | \
4110 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_SRC_PORT_MASK | \
4111 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT | \
4112 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_PORT_MASK | \
Michael Chanc1935542015-12-27 18:19:28 -05004113 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_DST_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04004114
Michael Chan61aad722017-02-12 19:18:14 -05004115#define BNXT_NTP_TUNNEL_FLTR_FLAG \
4116 CFA_NTUPLE_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE
4117
Michael Chanc0c050c2015-10-22 16:01:17 -04004118static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp,
4119 struct bnxt_ntuple_filter *fltr)
4120{
4121 int rc = 0;
4122 struct hwrm_cfa_ntuple_filter_alloc_input req = {0};
4123 struct hwrm_cfa_ntuple_filter_alloc_output *resp =
4124 bp->hwrm_cmd_resp_addr;
4125 struct flow_keys *keys = &fltr->fkeys;
4126 struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1];
4127
4128 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1);
Michael Chana54c4d72016-07-25 12:33:35 -04004129 req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx];
Michael Chanc0c050c2015-10-22 16:01:17 -04004130
4131 req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS);
4132
4133 req.ethertype = htons(ETH_P_IP);
4134 memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN);
Michael Chanc1935542015-12-27 18:19:28 -05004135 req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
Michael Chanc0c050c2015-10-22 16:01:17 -04004136 req.ip_protocol = keys->basic.ip_proto;
4137
Michael Chandda0e742016-12-29 12:13:40 -05004138 if (keys->basic.n_proto == htons(ETH_P_IPV6)) {
4139 int i;
4140
4141 req.ethertype = htons(ETH_P_IPV6);
4142 req.ip_addr_type =
4143 CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6;
4144 *(struct in6_addr *)&req.src_ipaddr[0] =
4145 keys->addrs.v6addrs.src;
4146 *(struct in6_addr *)&req.dst_ipaddr[0] =
4147 keys->addrs.v6addrs.dst;
4148 for (i = 0; i < 4; i++) {
4149 req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4150 req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff);
4151 }
4152 } else {
4153 req.src_ipaddr[0] = keys->addrs.v4addrs.src;
4154 req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4155 req.dst_ipaddr[0] = keys->addrs.v4addrs.dst;
4156 req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff);
4157 }
Michael Chan61aad722017-02-12 19:18:14 -05004158 if (keys->control.flags & FLOW_DIS_ENCAPSULATION) {
4159 req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG);
4160 req.tunnel_type =
4161 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL;
4162 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004163
4164 req.src_port = keys->ports.src;
4165 req.src_port_mask = cpu_to_be16(0xffff);
4166 req.dst_port = keys->ports.dst;
4167 req.dst_port_mask = cpu_to_be16(0xffff);
4168
Michael Chanc1935542015-12-27 18:19:28 -05004169 req.dst_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004170 mutex_lock(&bp->hwrm_cmd_lock);
4171 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4172 if (!rc)
4173 fltr->filter_id = resp->ntuple_filter_id;
4174 mutex_unlock(&bp->hwrm_cmd_lock);
4175 return rc;
4176}
4177#endif
4178
4179static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx,
4180 u8 *mac_addr)
4181{
4182 u32 rc = 0;
4183 struct hwrm_cfa_l2_filter_alloc_input req = {0};
4184 struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
4185
4186 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004187 req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX);
4188 if (!BNXT_CHIP_TYPE_NITRO_A0(bp))
4189 req.flags |=
4190 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST);
Michael Chanc1935542015-12-27 18:19:28 -05004191 req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004192 req.enables =
4193 cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR |
Michael Chanc1935542015-12-27 18:19:28 -05004194 CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID |
Michael Chanc0c050c2015-10-22 16:01:17 -04004195 CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK);
4196 memcpy(req.l2_addr, mac_addr, ETH_ALEN);
4197 req.l2_addr_mask[0] = 0xff;
4198 req.l2_addr_mask[1] = 0xff;
4199 req.l2_addr_mask[2] = 0xff;
4200 req.l2_addr_mask[3] = 0xff;
4201 req.l2_addr_mask[4] = 0xff;
4202 req.l2_addr_mask[5] = 0xff;
4203
4204 mutex_lock(&bp->hwrm_cmd_lock);
4205 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4206 if (!rc)
4207 bp->vnic_info[vnic_id].fw_l2_filter_id[idx] =
4208 resp->l2_filter_id;
4209 mutex_unlock(&bp->hwrm_cmd_lock);
4210 return rc;
4211}
4212
4213static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp)
4214{
4215 u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */
4216 int rc = 0;
4217
4218 /* Any associated ntuple filters will also be cleared by firmware. */
4219 mutex_lock(&bp->hwrm_cmd_lock);
4220 for (i = 0; i < num_of_vnics; i++) {
4221 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4222
4223 for (j = 0; j < vnic->uc_filter_count; j++) {
4224 struct hwrm_cfa_l2_filter_free_input req = {0};
4225
4226 bnxt_hwrm_cmd_hdr_init(bp, &req,
4227 HWRM_CFA_L2_FILTER_FREE, -1, -1);
4228
4229 req.l2_filter_id = vnic->fw_l2_filter_id[j];
4230
4231 rc = _hwrm_send_message(bp, &req, sizeof(req),
4232 HWRM_CMD_TIMEOUT);
4233 }
4234 vnic->uc_filter_count = 0;
4235 }
4236 mutex_unlock(&bp->hwrm_cmd_lock);
4237
4238 return rc;
4239}
4240
4241static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags)
4242{
4243 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4244 struct hwrm_vnic_tpa_cfg_input req = {0};
4245
Michael Chan3c4fe802018-03-09 23:46:10 -05004246 if (vnic->fw_vnic_id == INVALID_HW_RING_ID)
4247 return 0;
4248
Michael Chanc0c050c2015-10-22 16:01:17 -04004249 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1);
4250
4251 if (tpa_flags) {
4252 u16 mss = bp->dev->mtu - 40;
4253 u32 nsegs, n, segs = 0, flags;
4254
4255 flags = VNIC_TPA_CFG_REQ_FLAGS_TPA |
4256 VNIC_TPA_CFG_REQ_FLAGS_ENCAP_TPA |
4257 VNIC_TPA_CFG_REQ_FLAGS_RSC_WND_UPDATE |
4258 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_ECN |
4259 VNIC_TPA_CFG_REQ_FLAGS_AGG_WITH_SAME_GRE_SEQ;
4260 if (tpa_flags & BNXT_FLAG_GRO)
4261 flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO;
4262
4263 req.flags = cpu_to_le32(flags);
4264
4265 req.enables =
4266 cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS |
Michael Chanc1935542015-12-27 18:19:28 -05004267 VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS |
4268 VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04004269
4270 /* Number of segs are log2 units, and first packet is not
4271 * included as part of this units.
4272 */
Michael Chan2839f282016-04-25 02:30:50 -04004273 if (mss <= BNXT_RX_PAGE_SIZE) {
4274 n = BNXT_RX_PAGE_SIZE / mss;
Michael Chanc0c050c2015-10-22 16:01:17 -04004275 nsegs = (MAX_SKB_FRAGS - 1) * n;
4276 } else {
Michael Chan2839f282016-04-25 02:30:50 -04004277 n = mss / BNXT_RX_PAGE_SIZE;
4278 if (mss & (BNXT_RX_PAGE_SIZE - 1))
Michael Chanc0c050c2015-10-22 16:01:17 -04004279 n++;
4280 nsegs = (MAX_SKB_FRAGS - n) / n;
4281 }
4282
4283 segs = ilog2(nsegs);
4284 req.max_agg_segs = cpu_to_le16(segs);
4285 req.max_aggs = cpu_to_le16(VNIC_TPA_CFG_REQ_MAX_AGGS_MAX);
Michael Chanc1935542015-12-27 18:19:28 -05004286
4287 req.min_agg_len = cpu_to_le32(512);
Michael Chanc0c050c2015-10-22 16:01:17 -04004288 }
4289 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4290
4291 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4292}
4293
Michael Chan2c61d212018-10-14 07:02:50 -04004294static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring)
4295{
4296 struct bnxt_ring_grp_info *grp_info;
4297
4298 grp_info = &bp->grp_info[ring->grp_idx];
4299 return grp_info->cp_fw_ring_id;
4300}
4301
4302static u16 bnxt_cp_ring_for_rx(struct bnxt *bp, struct bnxt_rx_ring_info *rxr)
4303{
4304 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4305 struct bnxt_napi *bnapi = rxr->bnapi;
4306 struct bnxt_cp_ring_info *cpr;
4307
4308 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_RX_HDL];
4309 return cpr->cp_ring_struct.fw_ring_id;
4310 } else {
4311 return bnxt_cp_ring_from_grp(bp, &rxr->rx_ring_struct);
4312 }
4313}
4314
4315static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr)
4316{
4317 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4318 struct bnxt_napi *bnapi = txr->bnapi;
4319 struct bnxt_cp_ring_info *cpr;
4320
4321 cpr = bnapi->cp_ring.cp_ring_arr[BNXT_TX_HDL];
4322 return cpr->cp_ring_struct.fw_ring_id;
4323 } else {
4324 return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct);
4325 }
4326}
4327
Michael Chanc0c050c2015-10-22 16:01:17 -04004328static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss)
4329{
4330 u32 i, j, max_rings;
4331 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4332 struct hwrm_vnic_rss_cfg_input req = {0};
4333
Michael Chan7b3af4f2018-10-14 07:02:54 -04004334 if ((bp->flags & BNXT_FLAG_CHIP_P5) ||
4335 vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID)
Michael Chanc0c050c2015-10-22 16:01:17 -04004336 return 0;
4337
4338 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4339 if (set_rss) {
Michael Chan87da7f72016-11-16 21:13:09 -05004340 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
Michael Chan50f011b2018-08-05 16:51:51 -04004341 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004342 if (vnic->flags & BNXT_VNIC_RSS_FLAG) {
4343 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
4344 max_rings = bp->rx_nr_rings - 1;
4345 else
4346 max_rings = bp->rx_nr_rings;
4347 } else {
Michael Chanc0c050c2015-10-22 16:01:17 -04004348 max_rings = 1;
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004349 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004350
4351 /* Fill the RSS indirection table with ring group ids */
4352 for (i = 0, j = 0; i < HW_HASH_INDEX_SIZE; i++, j++) {
4353 if (j == max_rings)
4354 j = 0;
4355 vnic->rss_table[i] = cpu_to_le16(vnic->fw_grp_ids[j]);
4356 }
4357
4358 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4359 req.hash_key_tbl_addr =
4360 cpu_to_le64(vnic->rss_hash_key_dma_addr);
4361 }
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004362 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04004363 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4364}
4365
Michael Chan7b3af4f2018-10-14 07:02:54 -04004366static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss)
4367{
4368 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4369 u32 i, j, k, nr_ctxs, max_rings = bp->rx_nr_rings;
4370 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4371 struct hwrm_vnic_rss_cfg_input req = {0};
4372
4373 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1);
4374 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
4375 if (!set_rss) {
4376 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4377 return 0;
4378 }
4379 req.hash_type = cpu_to_le32(bp->rss_hash_cfg);
4380 req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT;
4381 req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr);
4382 req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr);
4383 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
4384 for (i = 0, k = 0; i < nr_ctxs; i++) {
4385 __le16 *ring_tbl = vnic->rss_table;
4386 int rc;
4387
4388 req.ring_table_pair_index = i;
4389 req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]);
4390 for (j = 0; j < 64; j++) {
4391 u16 ring_id;
4392
4393 ring_id = rxr->rx_ring_struct.fw_ring_id;
4394 *ring_tbl++ = cpu_to_le16(ring_id);
4395 ring_id = bnxt_cp_ring_for_rx(bp, rxr);
4396 *ring_tbl++ = cpu_to_le16(ring_id);
4397 rxr++;
4398 k++;
4399 if (k == max_rings) {
4400 k = 0;
4401 rxr = &bp->rx_ring[0];
4402 }
4403 }
4404 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4405 if (rc)
4406 return -EIO;
4407 }
4408 return 0;
4409}
4410
Michael Chanc0c050c2015-10-22 16:01:17 -04004411static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id)
4412{
4413 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4414 struct hwrm_vnic_plcmodes_cfg_input req = {0};
4415
4416 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1);
4417 req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT |
4418 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 |
4419 VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6);
4420 req.enables =
4421 cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID |
4422 VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID);
4423 /* thresholds not implemented in firmware yet */
4424 req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh);
4425 req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh);
4426 req.vnic_id = cpu_to_le32(vnic->fw_vnic_id);
4427 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4428}
4429
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004430static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id,
4431 u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04004432{
4433 struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0};
4434
4435 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1);
4436 req.rss_cos_lb_ctx_id =
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004437 cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]);
Michael Chanc0c050c2015-10-22 16:01:17 -04004438
4439 hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004440 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004441}
4442
4443static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp)
4444{
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004445 int i, j;
Michael Chanc0c050c2015-10-22 16:01:17 -04004446
4447 for (i = 0; i < bp->nr_vnics; i++) {
4448 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
4449
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004450 for (j = 0; j < BNXT_MAX_CTX_PER_VNIC; j++) {
4451 if (vnic->fw_rss_cos_lb_ctx[j] != INVALID_HW_RING_ID)
4452 bnxt_hwrm_vnic_ctx_free_one(bp, i, j);
4453 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004454 }
4455 bp->rsscos_nr_ctxs = 0;
4456}
4457
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004458static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx)
Michael Chanc0c050c2015-10-22 16:01:17 -04004459{
4460 int rc;
4461 struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0};
4462 struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp =
4463 bp->hwrm_cmd_resp_addr;
4464
4465 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1,
4466 -1);
4467
4468 mutex_lock(&bp->hwrm_cmd_lock);
4469 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4470 if (!rc)
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004471 bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] =
Michael Chanc0c050c2015-10-22 16:01:17 -04004472 le16_to_cpu(resp->rss_cos_lb_ctx_id);
4473 mutex_unlock(&bp->hwrm_cmd_lock);
4474
4475 return rc;
4476}
4477
Michael Chanabe93ad2018-03-31 13:54:08 -04004478static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp)
4479{
4480 if (bp->flags & BNXT_FLAG_ROCE_MIRROR_CAP)
4481 return VNIC_CFG_REQ_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_MODE;
4482 return VNIC_CFG_REQ_FLAGS_ROCE_DUAL_VNIC_MODE;
4483}
4484
Michael Chana588e452016-12-07 00:26:21 -05004485int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04004486{
Michael Chanb81a90d2016-01-02 23:45:01 -05004487 unsigned int ring = 0, grp_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04004488 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4489 struct hwrm_vnic_cfg_input req = {0};
Michael Chancf6645f2016-06-13 02:25:28 -04004490 u16 def_vlan = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004491
4492 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1);
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004493
Michael Chan7b3af4f2018-10-14 07:02:54 -04004494 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4495 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0];
4496
4497 req.default_rx_ring_id =
4498 cpu_to_le16(rxr->rx_ring_struct.fw_ring_id);
4499 req.default_cmpl_ring_id =
4500 cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr));
4501 req.enables =
4502 cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID |
4503 VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID);
4504 goto vnic_mru;
4505 }
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004506 req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP);
4507 /* Only RSS support for now TBD: COS & LB */
4508 if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) {
4509 req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]);
4510 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4511 VNIC_CFG_REQ_ENABLES_MRU);
Michael Chanae10ae72016-12-29 12:13:38 -05004512 } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) {
4513 req.rss_rule =
4514 cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]);
4515 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE |
4516 VNIC_CFG_REQ_ENABLES_MRU);
4517 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE);
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04004518 } else {
4519 req.rss_rule = cpu_to_le16(0xffff);
4520 }
4521
4522 if (BNXT_CHIP_TYPE_NITRO_A0(bp) &&
4523 (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) {
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04004524 req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]);
4525 req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE);
4526 } else {
4527 req.cos_rule = cpu_to_le16(0xffff);
4528 }
4529
Michael Chanc0c050c2015-10-22 16:01:17 -04004530 if (vnic->flags & BNXT_VNIC_RSS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05004531 ring = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04004532 else if (vnic->flags & BNXT_VNIC_RFS_FLAG)
Michael Chanb81a90d2016-01-02 23:45:01 -05004533 ring = vnic_id - 1;
Prashant Sreedharan76595192016-07-18 07:15:22 -04004534 else if ((vnic_id == 1) && BNXT_CHIP_TYPE_NITRO_A0(bp))
4535 ring = bp->rx_nr_rings - 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04004536
Michael Chanb81a90d2016-01-02 23:45:01 -05004537 grp_idx = bp->rx_ring[ring].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004538 req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004539 req.lb_rule = cpu_to_le16(0xffff);
Michael Chan7b3af4f2018-10-14 07:02:54 -04004540vnic_mru:
Michael Chanc0c050c2015-10-22 16:01:17 -04004541 req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + ETH_FCS_LEN +
4542 VLAN_HLEN);
4543
Michael Chan7b3af4f2018-10-14 07:02:54 -04004544 req.vnic_id = cpu_to_le16(vnic->fw_vnic_id);
Michael Chancf6645f2016-06-13 02:25:28 -04004545#ifdef CONFIG_BNXT_SRIOV
4546 if (BNXT_VF(bp))
4547 def_vlan = bp->vf.vlan;
4548#endif
4549 if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan)
Michael Chanc0c050c2015-10-22 16:01:17 -04004550 req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE);
Michael Chana588e452016-12-07 00:26:21 -05004551 if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP))
Michael Chanabe93ad2018-03-31 13:54:08 -04004552 req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp));
Michael Chanc0c050c2015-10-22 16:01:17 -04004553
4554 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4555}
4556
4557static int bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id)
4558{
4559 u32 rc = 0;
4560
4561 if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) {
4562 struct hwrm_vnic_free_input req = {0};
4563
4564 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1);
4565 req.vnic_id =
4566 cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id);
4567
4568 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4569 if (rc)
4570 return rc;
4571 bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID;
4572 }
4573 return rc;
4574}
4575
4576static void bnxt_hwrm_vnic_free(struct bnxt *bp)
4577{
4578 u16 i;
4579
4580 for (i = 0; i < bp->nr_vnics; i++)
4581 bnxt_hwrm_vnic_free_one(bp, i);
4582}
4583
Michael Chanb81a90d2016-01-02 23:45:01 -05004584static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id,
4585 unsigned int start_rx_ring_idx,
4586 unsigned int nr_rings)
Michael Chanc0c050c2015-10-22 16:01:17 -04004587{
Michael Chanb81a90d2016-01-02 23:45:01 -05004588 int rc = 0;
4589 unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004590 struct hwrm_vnic_alloc_input req = {0};
4591 struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan44c6f722018-10-14 07:02:53 -04004592 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
4593
4594 if (bp->flags & BNXT_FLAG_CHIP_P5)
4595 goto vnic_no_ring_grps;
Michael Chanc0c050c2015-10-22 16:01:17 -04004596
4597 /* map ring groups to this vnic */
Michael Chanb81a90d2016-01-02 23:45:01 -05004598 for (i = start_rx_ring_idx, j = 0; i < end_idx; i++, j++) {
4599 grp_idx = bp->rx_ring[i].bnapi->index;
4600 if (bp->grp_info[grp_idx].fw_grp_id == INVALID_HW_RING_ID) {
Michael Chanc0c050c2015-10-22 16:01:17 -04004601 netdev_err(bp->dev, "Not enough ring groups avail:%x req:%x\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05004602 j, nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04004603 break;
4604 }
Michael Chan44c6f722018-10-14 07:02:53 -04004605 vnic->fw_grp_ids[j] = bp->grp_info[grp_idx].fw_grp_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004606 }
4607
Michael Chan44c6f722018-10-14 07:02:53 -04004608vnic_no_ring_grps:
4609 for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++)
4610 vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04004611 if (vnic_id == 0)
4612 req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT);
4613
4614 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1);
4615
4616 mutex_lock(&bp->hwrm_cmd_lock);
4617 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4618 if (!rc)
Michael Chan44c6f722018-10-14 07:02:53 -04004619 vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004620 mutex_unlock(&bp->hwrm_cmd_lock);
4621 return rc;
4622}
4623
Michael Chan8fdefd62016-12-29 12:13:36 -05004624static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp)
4625{
4626 struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
4627 struct hwrm_vnic_qcaps_input req = {0};
4628 int rc;
4629
4630 if (bp->hwrm_spec_code < 0x10600)
4631 return 0;
4632
4633 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1);
4634 mutex_lock(&bp->hwrm_cmd_lock);
4635 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4636 if (!rc) {
Michael Chanabe93ad2018-03-31 13:54:08 -04004637 u32 flags = le32_to_cpu(resp->flags);
4638
Michael Chan41e8d792018-10-14 07:02:48 -04004639 if (!(bp->flags & BNXT_FLAG_CHIP_P5) &&
4640 (flags & VNIC_QCAPS_RESP_FLAGS_RSS_DFLT_CR_CAP))
Michael Chan8fdefd62016-12-29 12:13:36 -05004641 bp->flags |= BNXT_FLAG_NEW_RSS_CAP;
Michael Chanabe93ad2018-03-31 13:54:08 -04004642 if (flags &
4643 VNIC_QCAPS_RESP_FLAGS_ROCE_MIRRORING_CAPABLE_VNIC_CAP)
4644 bp->flags |= BNXT_FLAG_ROCE_MIRROR_CAP;
Michael Chan8fdefd62016-12-29 12:13:36 -05004645 }
4646 mutex_unlock(&bp->hwrm_cmd_lock);
4647 return rc;
4648}
4649
Michael Chanc0c050c2015-10-22 16:01:17 -04004650static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp)
4651{
4652 u16 i;
4653 u32 rc = 0;
4654
Michael Chan44c6f722018-10-14 07:02:53 -04004655 if (bp->flags & BNXT_FLAG_CHIP_P5)
4656 return 0;
4657
Michael Chanc0c050c2015-10-22 16:01:17 -04004658 mutex_lock(&bp->hwrm_cmd_lock);
4659 for (i = 0; i < bp->rx_nr_rings; i++) {
4660 struct hwrm_ring_grp_alloc_input req = {0};
4661 struct hwrm_ring_grp_alloc_output *resp =
4662 bp->hwrm_cmd_resp_addr;
Michael Chanb81a90d2016-01-02 23:45:01 -05004663 unsigned int grp_idx = bp->rx_ring[i].bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004664
4665 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1);
4666
Michael Chanb81a90d2016-01-02 23:45:01 -05004667 req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id);
4668 req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id);
4669 req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id);
4670 req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004671
4672 rc = _hwrm_send_message(bp, &req, sizeof(req),
4673 HWRM_CMD_TIMEOUT);
4674 if (rc)
4675 break;
4676
Michael Chanb81a90d2016-01-02 23:45:01 -05004677 bp->grp_info[grp_idx].fw_grp_id =
4678 le32_to_cpu(resp->ring_group_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004679 }
4680 mutex_unlock(&bp->hwrm_cmd_lock);
4681 return rc;
4682}
4683
4684static int bnxt_hwrm_ring_grp_free(struct bnxt *bp)
4685{
4686 u16 i;
4687 u32 rc = 0;
4688 struct hwrm_ring_grp_free_input req = {0};
4689
Michael Chan44c6f722018-10-14 07:02:53 -04004690 if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5))
Michael Chanc0c050c2015-10-22 16:01:17 -04004691 return 0;
4692
4693 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1);
4694
4695 mutex_lock(&bp->hwrm_cmd_lock);
4696 for (i = 0; i < bp->cp_nr_rings; i++) {
4697 if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID)
4698 continue;
4699 req.ring_group_id =
4700 cpu_to_le32(bp->grp_info[i].fw_grp_id);
4701
4702 rc = _hwrm_send_message(bp, &req, sizeof(req),
4703 HWRM_CMD_TIMEOUT);
4704 if (rc)
4705 break;
4706 bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID;
4707 }
4708 mutex_unlock(&bp->hwrm_cmd_lock);
4709 return rc;
4710}
4711
4712static int hwrm_ring_alloc_send_msg(struct bnxt *bp,
4713 struct bnxt_ring_struct *ring,
Michael Chan9899bb52018-03-31 13:54:16 -04004714 u32 ring_type, u32 map_index)
Michael Chanc0c050c2015-10-22 16:01:17 -04004715{
4716 int rc = 0, err = 0;
4717 struct hwrm_ring_alloc_input req = {0};
4718 struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan6fe19882018-10-14 07:02:41 -04004719 struct bnxt_ring_mem_info *rmem = &ring->ring_mem;
Michael Chan9899bb52018-03-31 13:54:16 -04004720 struct bnxt_ring_grp_info *grp_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04004721 u16 ring_id;
4722
4723 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1);
4724
4725 req.enables = 0;
Michael Chan6fe19882018-10-14 07:02:41 -04004726 if (rmem->nr_pages > 1) {
4727 req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map);
Michael Chanc0c050c2015-10-22 16:01:17 -04004728 /* Page size is in log2 units */
4729 req.page_size = BNXT_PAGE_SHIFT;
4730 req.page_tbl_depth = 1;
4731 } else {
Michael Chan6fe19882018-10-14 07:02:41 -04004732 req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]);
Michael Chanc0c050c2015-10-22 16:01:17 -04004733 }
4734 req.fbo = 0;
4735 /* Association of ring index with doorbell index and MSIX number */
4736 req.logical_id = cpu_to_le16(map_index);
4737
4738 switch (ring_type) {
Michael Chan2c61d212018-10-14 07:02:50 -04004739 case HWRM_RING_ALLOC_TX: {
4740 struct bnxt_tx_ring_info *txr;
4741
4742 txr = container_of(ring, struct bnxt_tx_ring_info,
4743 tx_ring_struct);
Michael Chanc0c050c2015-10-22 16:01:17 -04004744 req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX;
4745 /* Association of transmit ring with completion ring */
Michael Chan9899bb52018-03-31 13:54:16 -04004746 grp_info = &bp->grp_info[ring->grp_idx];
Michael Chan2c61d212018-10-14 07:02:50 -04004747 req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr));
Michael Chanc0c050c2015-10-22 16:01:17 -04004748 req.length = cpu_to_le32(bp->tx_ring_mask + 1);
Michael Chan9899bb52018-03-31 13:54:16 -04004749 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004750 req.queue_id = cpu_to_le16(ring->queue_id);
4751 break;
Michael Chan2c61d212018-10-14 07:02:50 -04004752 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004753 case HWRM_RING_ALLOC_RX:
4754 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4755 req.length = cpu_to_le32(bp->rx_ring_mask + 1);
Michael Chan23aefdd2018-10-14 07:02:51 -04004756 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4757 u16 flags = 0;
4758
4759 /* Association of rx ring with stats context */
4760 grp_info = &bp->grp_info[ring->grp_idx];
4761 req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size);
4762 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4763 req.enables |= cpu_to_le32(
4764 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4765 if (NET_IP_ALIGN == 2)
4766 flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD;
4767 req.flags = cpu_to_le16(flags);
4768 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004769 break;
4770 case HWRM_RING_ALLOC_AGG:
Michael Chan23aefdd2018-10-14 07:02:51 -04004771 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4772 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG;
4773 /* Association of agg ring with rx ring */
4774 grp_info = &bp->grp_info[ring->grp_idx];
4775 req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id);
4776 req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE);
4777 req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx);
4778 req.enables |= cpu_to_le32(
4779 RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID |
4780 RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID);
4781 } else {
4782 req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX;
4783 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004784 req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1);
4785 break;
4786 case HWRM_RING_ALLOC_CMPL:
Michael Chanbac9a7e2017-02-12 19:18:10 -05004787 req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
Michael Chanc0c050c2015-10-22 16:01:17 -04004788 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
Michael Chan23aefdd2018-10-14 07:02:51 -04004789 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4790 /* Association of cp ring with nq */
4791 grp_info = &bp->grp_info[map_index];
4792 req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id);
4793 req.cq_handle = cpu_to_le64(ring->handle);
4794 req.enables |= cpu_to_le32(
4795 RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID);
4796 } else if (bp->flags & BNXT_FLAG_USING_MSIX) {
4797 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4798 }
4799 break;
4800 case HWRM_RING_ALLOC_NQ:
4801 req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ;
4802 req.length = cpu_to_le32(bp->cp_ring_mask + 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04004803 if (bp->flags & BNXT_FLAG_USING_MSIX)
4804 req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX;
4805 break;
4806 default:
4807 netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n",
4808 ring_type);
4809 return -1;
4810 }
4811
4812 mutex_lock(&bp->hwrm_cmd_lock);
4813 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4814 err = le16_to_cpu(resp->error_code);
4815 ring_id = le16_to_cpu(resp->ring_id);
4816 mutex_unlock(&bp->hwrm_cmd_lock);
4817
4818 if (rc || err) {
Michael Chan2727c882018-04-26 17:44:35 -04004819 netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n",
4820 ring_type, rc, err);
4821 return -EIO;
Michael Chanc0c050c2015-10-22 16:01:17 -04004822 }
4823 ring->fw_ring_id = ring_id;
4824 return rc;
4825}
4826
Michael Chan486b5c22016-12-29 12:13:42 -05004827static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx)
4828{
4829 int rc;
4830
4831 if (BNXT_PF(bp)) {
4832 struct hwrm_func_cfg_input req = {0};
4833
4834 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
4835 req.fid = cpu_to_le16(0xffff);
4836 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4837 req.async_event_cr = cpu_to_le16(idx);
4838 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4839 } else {
4840 struct hwrm_func_vf_cfg_input req = {0};
4841
4842 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1);
4843 req.enables =
4844 cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR);
4845 req.async_event_cr = cpu_to_le16(idx);
4846 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
4847 }
4848 return rc;
4849}
4850
Michael Chan697197e2018-10-14 07:02:46 -04004851static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type,
4852 u32 map_idx, u32 xid)
4853{
4854 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4855 if (BNXT_PF(bp))
4856 db->doorbell = bp->bar1 + 0x10000;
4857 else
4858 db->doorbell = bp->bar1 + 0x4000;
4859 switch (ring_type) {
4860 case HWRM_RING_ALLOC_TX:
4861 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SQ;
4862 break;
4863 case HWRM_RING_ALLOC_RX:
4864 case HWRM_RING_ALLOC_AGG:
4865 db->db_key64 = DBR_PATH_L2 | DBR_TYPE_SRQ;
4866 break;
4867 case HWRM_RING_ALLOC_CMPL:
4868 db->db_key64 = DBR_PATH_L2;
4869 break;
4870 case HWRM_RING_ALLOC_NQ:
4871 db->db_key64 = DBR_PATH_L2;
4872 break;
4873 }
4874 db->db_key64 |= (u64)xid << DBR_XID_SFT;
4875 } else {
4876 db->doorbell = bp->bar1 + map_idx * 0x80;
4877 switch (ring_type) {
4878 case HWRM_RING_ALLOC_TX:
4879 db->db_key32 = DB_KEY_TX;
4880 break;
4881 case HWRM_RING_ALLOC_RX:
4882 case HWRM_RING_ALLOC_AGG:
4883 db->db_key32 = DB_KEY_RX;
4884 break;
4885 case HWRM_RING_ALLOC_CMPL:
4886 db->db_key32 = DB_KEY_CP;
4887 break;
4888 }
4889 }
4890}
4891
Michael Chanc0c050c2015-10-22 16:01:17 -04004892static int bnxt_hwrm_ring_alloc(struct bnxt *bp)
4893{
4894 int i, rc = 0;
Michael Chan697197e2018-10-14 07:02:46 -04004895 u32 type;
Michael Chanc0c050c2015-10-22 16:01:17 -04004896
Michael Chan23aefdd2018-10-14 07:02:51 -04004897 if (bp->flags & BNXT_FLAG_CHIP_P5)
4898 type = HWRM_RING_ALLOC_NQ;
4899 else
4900 type = HWRM_RING_ALLOC_CMPL;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004901 for (i = 0; i < bp->cp_nr_rings; i++) {
4902 struct bnxt_napi *bnapi = bp->bnapi[i];
4903 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4904 struct bnxt_ring_struct *ring = &cpr->cp_ring_struct;
Michael Chan9899bb52018-03-31 13:54:16 -04004905 u32 map_idx = ring->map_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04004906
Michael Chan697197e2018-10-14 07:02:46 -04004907 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05004908 if (rc)
4909 goto err_out;
Michael Chan697197e2018-10-14 07:02:46 -04004910 bnxt_set_db(bp, &cpr->cp_db, type, map_idx, ring->fw_ring_id);
4911 bnxt_db_nq(bp, &cpr->cp_db, cpr->cp_raw_cons);
Michael Chanedd0c2c2015-12-27 18:19:19 -05004912 bp->grp_info[i].cp_fw_ring_id = ring->fw_ring_id;
Michael Chan486b5c22016-12-29 12:13:42 -05004913
4914 if (!i) {
4915 rc = bnxt_hwrm_set_async_event_cr(bp, ring->fw_ring_id);
4916 if (rc)
4917 netdev_warn(bp->dev, "Failed to set async event completion ring.\n");
4918 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004919 }
4920
Michael Chan697197e2018-10-14 07:02:46 -04004921 type = HWRM_RING_ALLOC_TX;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004922 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004923 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chan3e08b182018-10-14 07:02:52 -04004924 struct bnxt_ring_struct *ring;
4925 u32 map_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04004926
Michael Chan3e08b182018-10-14 07:02:52 -04004927 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4928 struct bnxt_napi *bnapi = txr->bnapi;
4929 struct bnxt_cp_ring_info *cpr, *cpr2;
4930 u32 type2 = HWRM_RING_ALLOC_CMPL;
4931
4932 cpr = &bnapi->cp_ring;
4933 cpr2 = cpr->cp_ring_arr[BNXT_TX_HDL];
4934 ring = &cpr2->cp_ring_struct;
4935 ring->handle = BNXT_TX_HDL;
4936 map_idx = bnapi->index;
4937 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
4938 if (rc)
4939 goto err_out;
4940 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
4941 ring->fw_ring_id);
4942 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
4943 }
4944 ring = &txr->tx_ring_struct;
4945 map_idx = i;
Michael Chan697197e2018-10-14 07:02:46 -04004946 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05004947 if (rc)
4948 goto err_out;
Michael Chan697197e2018-10-14 07:02:46 -04004949 bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id);
Michael Chanc0c050c2015-10-22 16:01:17 -04004950 }
4951
Michael Chan697197e2018-10-14 07:02:46 -04004952 type = HWRM_RING_ALLOC_RX;
Michael Chanedd0c2c2015-12-27 18:19:19 -05004953 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004954 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05004955 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chan3e08b182018-10-14 07:02:52 -04004956 struct bnxt_napi *bnapi = rxr->bnapi;
4957 u32 map_idx = bnapi->index;
Michael Chanc0c050c2015-10-22 16:01:17 -04004958
Michael Chan697197e2018-10-14 07:02:46 -04004959 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanedd0c2c2015-12-27 18:19:19 -05004960 if (rc)
4961 goto err_out;
Michael Chan697197e2018-10-14 07:02:46 -04004962 bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id);
4963 bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod);
Michael Chanb81a90d2016-01-02 23:45:01 -05004964 bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id;
Michael Chan3e08b182018-10-14 07:02:52 -04004965 if (bp->flags & BNXT_FLAG_CHIP_P5) {
4966 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
4967 u32 type2 = HWRM_RING_ALLOC_CMPL;
4968 struct bnxt_cp_ring_info *cpr2;
4969
4970 cpr2 = cpr->cp_ring_arr[BNXT_RX_HDL];
4971 ring = &cpr2->cp_ring_struct;
4972 ring->handle = BNXT_RX_HDL;
4973 rc = hwrm_ring_alloc_send_msg(bp, ring, type2, map_idx);
4974 if (rc)
4975 goto err_out;
4976 bnxt_set_db(bp, &cpr2->cp_db, type2, map_idx,
4977 ring->fw_ring_id);
4978 bnxt_db_cq(bp, &cpr2->cp_db, cpr2->cp_raw_cons);
4979 }
Michael Chanc0c050c2015-10-22 16:01:17 -04004980 }
4981
4982 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
Michael Chan697197e2018-10-14 07:02:46 -04004983 type = HWRM_RING_ALLOC_AGG;
Michael Chanc0c050c2015-10-22 16:01:17 -04004984 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05004985 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04004986 struct bnxt_ring_struct *ring =
4987 &rxr->rx_agg_ring_struct;
Michael Chan9899bb52018-03-31 13:54:16 -04004988 u32 grp_idx = ring->grp_idx;
Michael Chanb81a90d2016-01-02 23:45:01 -05004989 u32 map_idx = grp_idx + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04004990
Michael Chan697197e2018-10-14 07:02:46 -04004991 rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx);
Michael Chanc0c050c2015-10-22 16:01:17 -04004992 if (rc)
4993 goto err_out;
4994
Michael Chan697197e2018-10-14 07:02:46 -04004995 bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx,
4996 ring->fw_ring_id);
4997 bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod);
Michael Chanb81a90d2016-01-02 23:45:01 -05004998 bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04004999 }
5000 }
5001err_out:
5002 return rc;
5003}
5004
5005static int hwrm_ring_free_send_msg(struct bnxt *bp,
5006 struct bnxt_ring_struct *ring,
5007 u32 ring_type, int cmpl_ring_id)
5008{
5009 int rc;
5010 struct hwrm_ring_free_input req = {0};
5011 struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr;
5012 u16 error_code;
5013
Prashant Sreedharan74608fc2016-01-28 03:11:20 -05005014 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04005015 req.ring_type = ring_type;
5016 req.ring_id = cpu_to_le16(ring->fw_ring_id);
5017
5018 mutex_lock(&bp->hwrm_cmd_lock);
5019 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5020 error_code = le16_to_cpu(resp->error_code);
5021 mutex_unlock(&bp->hwrm_cmd_lock);
5022
5023 if (rc || error_code) {
Michael Chan2727c882018-04-26 17:44:35 -04005024 netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n",
5025 ring_type, rc, error_code);
5026 return -EIO;
Michael Chanc0c050c2015-10-22 16:01:17 -04005027 }
5028 return 0;
5029}
5030
Michael Chanedd0c2c2015-12-27 18:19:19 -05005031static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path)
Michael Chanc0c050c2015-10-22 16:01:17 -04005032{
Michael Chan23aefdd2018-10-14 07:02:51 -04005033 u32 type;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005034 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04005035
5036 if (!bp->bnapi)
Michael Chanedd0c2c2015-12-27 18:19:19 -05005037 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04005038
Michael Chanedd0c2c2015-12-27 18:19:19 -05005039 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005040 struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005041 struct bnxt_ring_struct *ring = &txr->tx_ring_struct;
Michael Chan2c61d212018-10-14 07:02:50 -04005042 u32 cmpl_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005043
Michael Chan2c61d212018-10-14 07:02:50 -04005044 cmpl_ring_id = bnxt_cp_ring_for_tx(bp, txr);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005045 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5046 hwrm_ring_free_send_msg(bp, ring,
5047 RING_FREE_REQ_RING_TYPE_TX,
5048 close_path ? cmpl_ring_id :
5049 INVALID_HW_RING_ID);
5050 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005051 }
5052 }
5053
Michael Chanedd0c2c2015-12-27 18:19:19 -05005054 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005055 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005056 struct bnxt_ring_struct *ring = &rxr->rx_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05005057 u32 grp_idx = rxr->bnapi->index;
Michael Chan2c61d212018-10-14 07:02:50 -04005058 u32 cmpl_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005059
Michael Chan2c61d212018-10-14 07:02:50 -04005060 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005061 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
5062 hwrm_ring_free_send_msg(bp, ring,
5063 RING_FREE_REQ_RING_TYPE_RX,
5064 close_path ? cmpl_ring_id :
5065 INVALID_HW_RING_ID);
5066 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05005067 bp->grp_info[grp_idx].rx_fw_ring_id =
5068 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005069 }
5070 }
5071
Michael Chan23aefdd2018-10-14 07:02:51 -04005072 if (bp->flags & BNXT_FLAG_CHIP_P5)
5073 type = RING_FREE_REQ_RING_TYPE_RX_AGG;
5074 else
5075 type = RING_FREE_REQ_RING_TYPE_RX;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005076 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05005077 struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i];
Michael Chanedd0c2c2015-12-27 18:19:19 -05005078 struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct;
Michael Chanb81a90d2016-01-02 23:45:01 -05005079 u32 grp_idx = rxr->bnapi->index;
Michael Chan2c61d212018-10-14 07:02:50 -04005080 u32 cmpl_ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005081
Michael Chan2c61d212018-10-14 07:02:50 -04005082 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr);
Michael Chanedd0c2c2015-12-27 18:19:19 -05005083 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
Michael Chan23aefdd2018-10-14 07:02:51 -04005084 hwrm_ring_free_send_msg(bp, ring, type,
Michael Chanedd0c2c2015-12-27 18:19:19 -05005085 close_path ? cmpl_ring_id :
5086 INVALID_HW_RING_ID);
5087 ring->fw_ring_id = INVALID_HW_RING_ID;
Michael Chanb81a90d2016-01-02 23:45:01 -05005088 bp->grp_info[grp_idx].agg_fw_ring_id =
5089 INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005090 }
5091 }
5092
Michael Chan9d8bc092016-12-29 12:13:33 -05005093 /* The completion rings are about to be freed. After that the
5094 * IRQ doorbell will not work anymore. So we need to disable
5095 * IRQ here.
5096 */
5097 bnxt_disable_int_sync(bp);
5098
Michael Chan23aefdd2018-10-14 07:02:51 -04005099 if (bp->flags & BNXT_FLAG_CHIP_P5)
5100 type = RING_FREE_REQ_RING_TYPE_NQ;
5101 else
5102 type = RING_FREE_REQ_RING_TYPE_L2_CMPL;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005103 for (i = 0; i < bp->cp_nr_rings; i++) {
5104 struct bnxt_napi *bnapi = bp->bnapi[i];
5105 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
Michael Chan3e08b182018-10-14 07:02:52 -04005106 struct bnxt_ring_struct *ring;
5107 int j;
Michael Chanc0c050c2015-10-22 16:01:17 -04005108
Michael Chan3e08b182018-10-14 07:02:52 -04005109 for (j = 0; j < 2; j++) {
5110 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
5111
5112 if (cpr2) {
5113 ring = &cpr2->cp_ring_struct;
5114 if (ring->fw_ring_id == INVALID_HW_RING_ID)
5115 continue;
5116 hwrm_ring_free_send_msg(bp, ring,
5117 RING_FREE_REQ_RING_TYPE_L2_CMPL,
5118 INVALID_HW_RING_ID);
5119 ring->fw_ring_id = INVALID_HW_RING_ID;
5120 }
5121 }
5122 ring = &cpr->cp_ring_struct;
Michael Chanedd0c2c2015-12-27 18:19:19 -05005123 if (ring->fw_ring_id != INVALID_HW_RING_ID) {
Michael Chan23aefdd2018-10-14 07:02:51 -04005124 hwrm_ring_free_send_msg(bp, ring, type,
Michael Chanedd0c2c2015-12-27 18:19:19 -05005125 INVALID_HW_RING_ID);
5126 ring->fw_ring_id = INVALID_HW_RING_ID;
5127 bp->grp_info[i].cp_fw_ring_id = INVALID_HW_RING_ID;
Michael Chanc0c050c2015-10-22 16:01:17 -04005128 }
5129 }
Michael Chanc0c050c2015-10-22 16:01:17 -04005130}
5131
Michael Chan41e8d792018-10-14 07:02:48 -04005132static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
5133 bool shared);
5134
Michael Chan674f50a2018-01-17 03:21:09 -05005135static int bnxt_hwrm_get_rings(struct bnxt *bp)
5136{
5137 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5138 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
5139 struct hwrm_func_qcfg_input req = {0};
5140 int rc;
5141
5142 if (bp->hwrm_spec_code < 0x10601)
5143 return 0;
5144
5145 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5146 req.fid = cpu_to_le16(0xffff);
5147 mutex_lock(&bp->hwrm_cmd_lock);
5148 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5149 if (rc) {
5150 mutex_unlock(&bp->hwrm_cmd_lock);
5151 return -EIO;
5152 }
5153
5154 hw_resc->resv_tx_rings = le16_to_cpu(resp->alloc_tx_rings);
Michael Chanf1ca94d2018-08-05 16:51:53 -04005155 if (BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005156 u16 cp, stats;
5157
5158 hw_resc->resv_rx_rings = le16_to_cpu(resp->alloc_rx_rings);
5159 hw_resc->resv_hw_ring_grps =
5160 le32_to_cpu(resp->alloc_hw_ring_grps);
5161 hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics);
5162 cp = le16_to_cpu(resp->alloc_cmpl_rings);
5163 stats = le16_to_cpu(resp->alloc_stat_ctx);
5164 cp = min_t(u16, cp, stats);
Michael Chan75720e62018-12-09 07:01:00 -05005165 hw_resc->resv_irqs = cp;
Michael Chan41e8d792018-10-14 07:02:48 -04005166 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5167 int rx = hw_resc->resv_rx_rings;
5168 int tx = hw_resc->resv_tx_rings;
5169
5170 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5171 rx >>= 1;
5172 if (cp < (rx + tx)) {
5173 bnxt_trim_rings(bp, &rx, &tx, cp, false);
5174 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5175 rx <<= 1;
5176 hw_resc->resv_rx_rings = rx;
5177 hw_resc->resv_tx_rings = tx;
5178 }
Michael Chan75720e62018-12-09 07:01:00 -05005179 hw_resc->resv_irqs = le16_to_cpu(resp->alloc_msix);
Michael Chan41e8d792018-10-14 07:02:48 -04005180 hw_resc->resv_hw_ring_grps = rx;
5181 }
Michael Chan674f50a2018-01-17 03:21:09 -05005182 hw_resc->resv_cp_rings = cp;
5183 }
5184 mutex_unlock(&bp->hwrm_cmd_lock);
5185 return 0;
5186}
5187
Michael Chan391be5c2016-12-29 12:13:41 -05005188/* Caller must hold bp->hwrm_cmd_lock */
5189int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
5190{
5191 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
5192 struct hwrm_func_qcfg_input req = {0};
5193 int rc;
5194
5195 if (bp->hwrm_spec_code < 0x10601)
5196 return 0;
5197
5198 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5199 req.fid = cpu_to_le16(fid);
5200 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5201 if (!rc)
5202 *tx_rings = le16_to_cpu(resp->alloc_tx_rings);
5203
5204 return rc;
5205}
5206
Michael Chan41e8d792018-10-14 07:02:48 -04005207static bool bnxt_rfs_supported(struct bnxt *bp);
5208
Michael Chan4ed50ef2018-03-09 23:46:03 -05005209static void
5210__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
5211 int tx_rings, int rx_rings, int ring_grps,
5212 int cp_rings, int vnics)
Michael Chan391be5c2016-12-29 12:13:41 -05005213{
Michael Chan674f50a2018-01-17 03:21:09 -05005214 u32 enables = 0;
Michael Chan391be5c2016-12-29 12:13:41 -05005215
Michael Chan4ed50ef2018-03-09 23:46:03 -05005216 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1);
5217 req->fid = cpu_to_le16(0xffff);
Michael Chan674f50a2018-01-17 03:21:09 -05005218 enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
Michael Chan4ed50ef2018-03-09 23:46:03 -05005219 req->num_tx_rings = cpu_to_le16(tx_rings);
Michael Chanf1ca94d2018-08-05 16:51:53 -04005220 if (BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005221 enables |= rx_rings ? FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005222 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5223 enables |= cp_rings ? FUNC_CFG_REQ_ENABLES_NUM_MSIX : 0;
5224 enables |= tx_rings + ring_grps ?
5225 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5226 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5227 enables |= rx_rings ?
5228 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5229 } else {
5230 enables |= cp_rings ?
5231 FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5232 FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5233 enables |= ring_grps ?
5234 FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS |
5235 FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5236 }
Michael Chandbe80d42018-10-05 00:26:00 -04005237 enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
Michael Chan674f50a2018-01-17 03:21:09 -05005238
Michael Chan4ed50ef2018-03-09 23:46:03 -05005239 req->num_rx_rings = cpu_to_le16(rx_rings);
Michael Chan41e8d792018-10-14 07:02:48 -04005240 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5241 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5242 req->num_msix = cpu_to_le16(cp_rings);
5243 req->num_rsscos_ctxs =
5244 cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5245 } else {
5246 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5247 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5248 req->num_rsscos_ctxs = cpu_to_le16(1);
5249 if (!(bp->flags & BNXT_FLAG_NEW_RSS_CAP) &&
5250 bnxt_rfs_supported(bp))
5251 req->num_rsscos_ctxs =
5252 cpu_to_le16(ring_grps + 1);
5253 }
Michael Chan4ed50ef2018-03-09 23:46:03 -05005254 req->num_stat_ctxs = req->num_cmpl_rings;
5255 req->num_vnics = cpu_to_le16(vnics);
Michael Chan674f50a2018-01-17 03:21:09 -05005256 }
Michael Chan4ed50ef2018-03-09 23:46:03 -05005257 req->enables = cpu_to_le32(enables);
5258}
5259
5260static void
5261__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp,
5262 struct hwrm_func_vf_cfg_input *req, int tx_rings,
5263 int rx_rings, int ring_grps, int cp_rings,
5264 int vnics)
5265{
5266 u32 enables = 0;
5267
5268 bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1);
5269 enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005270 enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS |
5271 FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0;
5272 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5273 enables |= tx_rings + ring_grps ?
5274 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5275 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5276 } else {
5277 enables |= cp_rings ?
5278 FUNC_VF_CFG_REQ_ENABLES_NUM_CMPL_RINGS |
5279 FUNC_VF_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
5280 enables |= ring_grps ?
5281 FUNC_VF_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
5282 }
Michael Chan4ed50ef2018-03-09 23:46:03 -05005283 enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
Michael Chan41e8d792018-10-14 07:02:48 -04005284 enables |= FUNC_VF_CFG_REQ_ENABLES_NUM_L2_CTXS;
Michael Chan4ed50ef2018-03-09 23:46:03 -05005285
Michael Chan41e8d792018-10-14 07:02:48 -04005286 req->num_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX);
Michael Chan4ed50ef2018-03-09 23:46:03 -05005287 req->num_tx_rings = cpu_to_le16(tx_rings);
5288 req->num_rx_rings = cpu_to_le16(rx_rings);
Michael Chan41e8d792018-10-14 07:02:48 -04005289 if (bp->flags & BNXT_FLAG_CHIP_P5) {
5290 req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
5291 req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
5292 } else {
5293 req->num_cmpl_rings = cpu_to_le16(cp_rings);
5294 req->num_hw_ring_grps = cpu_to_le16(ring_grps);
5295 req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX);
5296 }
Michael Chan4ed50ef2018-03-09 23:46:03 -05005297 req->num_stat_ctxs = req->num_cmpl_rings;
5298 req->num_vnics = cpu_to_le16(vnics);
5299
5300 req->enables = cpu_to_le32(enables);
5301}
5302
5303static int
5304bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5305 int ring_grps, int cp_rings, int vnics)
5306{
5307 struct hwrm_func_cfg_input req = {0};
5308 int rc;
5309
5310 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5311 cp_rings, vnics);
5312 if (!req.enables)
Michael Chan674f50a2018-01-17 03:21:09 -05005313 return 0;
5314
Michael Chan674f50a2018-01-17 03:21:09 -05005315 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5316 if (rc)
5317 return -ENOMEM;
5318
5319 if (bp->hwrm_spec_code < 0x10601)
5320 bp->hw_resc.resv_tx_rings = tx_rings;
5321
5322 rc = bnxt_hwrm_get_rings(bp);
5323 return rc;
5324}
5325
5326static int
5327bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
5328 int ring_grps, int cp_rings, int vnics)
5329{
5330 struct hwrm_func_vf_cfg_input req = {0};
Michael Chan674f50a2018-01-17 03:21:09 -05005331 int rc;
5332
Michael Chanf1ca94d2018-08-05 16:51:53 -04005333 if (!BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005334 bp->hw_resc.resv_tx_rings = tx_rings;
5335 return 0;
5336 }
5337
Michael Chan4ed50ef2018-03-09 23:46:03 -05005338 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5339 cp_rings, vnics);
Michael Chan674f50a2018-01-17 03:21:09 -05005340 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5341 if (rc)
5342 return -ENOMEM;
5343
5344 rc = bnxt_hwrm_get_rings(bp);
5345 return rc;
5346}
5347
5348static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp,
5349 int cp, int vnic)
5350{
5351 if (BNXT_PF(bp))
5352 return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic);
5353 else
5354 return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic);
5355}
5356
Michael Chan08654eb2018-03-31 13:54:17 -04005357static int bnxt_cp_rings_in_use(struct bnxt *bp)
5358{
5359 int cp = bp->cp_nr_rings;
5360 int ulp_msix, ulp_base;
5361
5362 ulp_msix = bnxt_get_ulp_msix_num(bp);
5363 if (ulp_msix) {
5364 ulp_base = bnxt_get_ulp_msix_base(bp);
5365 cp += ulp_msix;
5366 if ((ulp_base + ulp_msix) > cp)
5367 cp = ulp_base + ulp_msix;
5368 }
5369 return cp;
5370}
5371
Michael Chan4e41dc52018-03-31 13:54:19 -04005372static bool bnxt_need_reserve_rings(struct bnxt *bp)
5373{
5374 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
Michael Chanfbcfc8e2018-03-31 13:54:20 -04005375 int cp = bnxt_cp_rings_in_use(bp);
Michael Chan4e41dc52018-03-31 13:54:19 -04005376 int rx = bp->rx_nr_rings;
5377 int vnic = 1, grp = rx;
5378
5379 if (bp->hwrm_spec_code < 0x10601)
5380 return false;
5381
5382 if (hw_resc->resv_tx_rings != bp->tx_nr_rings)
5383 return true;
5384
Michael Chan41e8d792018-10-14 07:02:48 -04005385 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
Michael Chan4e41dc52018-03-31 13:54:19 -04005386 vnic = rx + 1;
5387 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5388 rx <<= 1;
Michael Chanf1ca94d2018-08-05 16:51:53 -04005389 if (BNXT_NEW_RM(bp) &&
Michael Chan4e41dc52018-03-31 13:54:19 -04005390 (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp ||
Michael Chan41e8d792018-10-14 07:02:48 -04005391 hw_resc->resv_vnics != vnic ||
5392 (hw_resc->resv_hw_ring_grps != grp &&
5393 !(bp->flags & BNXT_FLAG_CHIP_P5))))
Michael Chan4e41dc52018-03-31 13:54:19 -04005394 return true;
5395 return false;
5396}
5397
Michael Chan674f50a2018-01-17 03:21:09 -05005398static int __bnxt_reserve_rings(struct bnxt *bp)
5399{
5400 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
Michael Chanfbcfc8e2018-03-31 13:54:20 -04005401 int cp = bnxt_cp_rings_in_use(bp);
Michael Chan674f50a2018-01-17 03:21:09 -05005402 int tx = bp->tx_nr_rings;
5403 int rx = bp->rx_nr_rings;
Michael Chan674f50a2018-01-17 03:21:09 -05005404 int grp, rx_rings, rc;
5405 bool sh = false;
5406 int vnic = 1;
5407
Michael Chan4e41dc52018-03-31 13:54:19 -04005408 if (!bnxt_need_reserve_rings(bp))
Michael Chan391be5c2016-12-29 12:13:41 -05005409 return 0;
5410
Michael Chan674f50a2018-01-17 03:21:09 -05005411 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
5412 sh = true;
Michael Chan41e8d792018-10-14 07:02:48 -04005413 if ((bp->flags & BNXT_FLAG_RFS) && !(bp->flags & BNXT_FLAG_CHIP_P5))
Michael Chan674f50a2018-01-17 03:21:09 -05005414 vnic = rx + 1;
5415 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5416 rx <<= 1;
Michael Chan674f50a2018-01-17 03:21:09 -05005417 grp = bp->rx_nr_rings;
Michael Chan391be5c2016-12-29 12:13:41 -05005418
Michael Chan674f50a2018-01-17 03:21:09 -05005419 rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic);
Michael Chan391be5c2016-12-29 12:13:41 -05005420 if (rc)
5421 return rc;
5422
Michael Chan674f50a2018-01-17 03:21:09 -05005423 tx = hw_resc->resv_tx_rings;
Michael Chanf1ca94d2018-08-05 16:51:53 -04005424 if (BNXT_NEW_RM(bp)) {
Michael Chan674f50a2018-01-17 03:21:09 -05005425 rx = hw_resc->resv_rx_rings;
5426 cp = hw_resc->resv_cp_rings;
5427 grp = hw_resc->resv_hw_ring_grps;
5428 vnic = hw_resc->resv_vnics;
5429 }
5430
5431 rx_rings = rx;
5432 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
5433 if (rx >= 2) {
5434 rx_rings = rx >> 1;
5435 } else {
5436 if (netif_running(bp->dev))
5437 return -ENOMEM;
5438
5439 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
5440 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
5441 bp->dev->hw_features &= ~NETIF_F_LRO;
5442 bp->dev->features &= ~NETIF_F_LRO;
5443 bnxt_set_ring_params(bp);
5444 }
5445 }
5446 rx_rings = min_t(int, rx_rings, grp);
5447 rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh);
5448 if (bp->flags & BNXT_FLAG_AGG_RINGS)
5449 rx = rx_rings << 1;
5450 cp = sh ? max_t(int, tx, rx_rings) : tx + rx_rings;
5451 bp->tx_nr_rings = tx;
5452 bp->rx_nr_rings = rx_rings;
5453 bp->cp_nr_rings = cp;
5454
5455 if (!tx || !rx || !cp || !grp || !vnic)
5456 return -ENOMEM;
5457
Michael Chan391be5c2016-12-29 12:13:41 -05005458 return rc;
5459}
5460
Michael Chan8f23d632018-01-17 03:21:12 -05005461static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005462 int ring_grps, int cp_rings, int vnics)
Michael Chan98fdbe72017-08-28 13:40:26 -04005463{
Michael Chan8f23d632018-01-17 03:21:12 -05005464 struct hwrm_func_vf_cfg_input req = {0};
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005465 u32 flags;
Michael Chan98fdbe72017-08-28 13:40:26 -04005466 int rc;
5467
Michael Chanf1ca94d2018-08-05 16:51:53 -04005468 if (!BNXT_NEW_RM(bp))
Michael Chan98fdbe72017-08-28 13:40:26 -04005469 return 0;
5470
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005471 __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5472 cp_rings, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005473 flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST |
5474 FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5475 FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
Michael Chan8f23d632018-01-17 03:21:12 -05005476 FUNC_VF_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
Michael Chan41e8d792018-10-14 07:02:48 -04005477 FUNC_VF_CFG_REQ_FLAGS_VNIC_ASSETS_TEST |
5478 FUNC_VF_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5479 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5480 flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
Michael Chan98fdbe72017-08-28 13:40:26 -04005481
Michael Chan8f23d632018-01-17 03:21:12 -05005482 req.flags = cpu_to_le32(flags);
Michael Chan98fdbe72017-08-28 13:40:26 -04005483 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5484 if (rc)
5485 return -ENOMEM;
5486 return 0;
5487}
5488
Michael Chan8f23d632018-01-17 03:21:12 -05005489static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005490 int ring_grps, int cp_rings, int vnics)
Michael Chan8f23d632018-01-17 03:21:12 -05005491{
5492 struct hwrm_func_cfg_input req = {0};
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005493 u32 flags;
Michael Chan8f23d632018-01-17 03:21:12 -05005494 int rc;
5495
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005496 __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps,
5497 cp_rings, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005498 flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST;
Michael Chan41e8d792018-10-14 07:02:48 -04005499 if (BNXT_NEW_RM(bp)) {
Michael Chan8f23d632018-01-17 03:21:12 -05005500 flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST |
5501 FUNC_CFG_REQ_FLAGS_CMPL_ASSETS_TEST |
Michael Chan8f23d632018-01-17 03:21:12 -05005502 FUNC_CFG_REQ_FLAGS_STAT_CTX_ASSETS_TEST |
5503 FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST;
Michael Chan41e8d792018-10-14 07:02:48 -04005504 if (bp->flags & BNXT_FLAG_CHIP_P5)
5505 flags |= FUNC_CFG_REQ_FLAGS_RSSCOS_CTX_ASSETS_TEST;
5506 else
5507 flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST;
5508 }
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005509
Michael Chan8f23d632018-01-17 03:21:12 -05005510 req.flags = cpu_to_le32(flags);
Michael Chan8f23d632018-01-17 03:21:12 -05005511 rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5512 if (rc)
5513 return -ENOMEM;
5514 return 0;
5515}
5516
5517static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings,
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005518 int ring_grps, int cp_rings, int vnics)
Michael Chan8f23d632018-01-17 03:21:12 -05005519{
5520 if (bp->hwrm_spec_code < 0x10801)
5521 return 0;
5522
5523 if (BNXT_PF(bp))
5524 return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings,
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005525 ring_grps, cp_rings, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005526
5527 return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps,
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05005528 cp_rings, vnics);
Michael Chan8f23d632018-01-17 03:21:12 -05005529}
5530
Michael Chan74706af2018-10-14 07:02:40 -04005531static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp)
5532{
5533 struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
5534 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5535 struct hwrm_ring_aggint_qcaps_input req = {0};
5536 int rc;
5537
5538 coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS;
5539 coal_cap->num_cmpl_dma_aggr_max = 63;
5540 coal_cap->num_cmpl_dma_aggr_during_int_max = 63;
5541 coal_cap->cmpl_aggr_dma_tmr_max = 65535;
5542 coal_cap->cmpl_aggr_dma_tmr_during_int_max = 65535;
5543 coal_cap->int_lat_tmr_min_max = 65535;
5544 coal_cap->int_lat_tmr_max_max = 65535;
5545 coal_cap->num_cmpl_aggr_int_max = 65535;
5546 coal_cap->timer_units = 80;
5547
5548 if (bp->hwrm_spec_code < 0x10902)
5549 return;
5550
5551 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1);
5552 mutex_lock(&bp->hwrm_cmd_lock);
5553 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5554 if (!rc) {
5555 coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params);
Michael Chan58590c82018-10-14 07:02:56 -04005556 coal_cap->nq_params = le32_to_cpu(resp->nq_params);
Michael Chan74706af2018-10-14 07:02:40 -04005557 coal_cap->num_cmpl_dma_aggr_max =
5558 le16_to_cpu(resp->num_cmpl_dma_aggr_max);
5559 coal_cap->num_cmpl_dma_aggr_during_int_max =
5560 le16_to_cpu(resp->num_cmpl_dma_aggr_during_int_max);
5561 coal_cap->cmpl_aggr_dma_tmr_max =
5562 le16_to_cpu(resp->cmpl_aggr_dma_tmr_max);
5563 coal_cap->cmpl_aggr_dma_tmr_during_int_max =
5564 le16_to_cpu(resp->cmpl_aggr_dma_tmr_during_int_max);
5565 coal_cap->int_lat_tmr_min_max =
5566 le16_to_cpu(resp->int_lat_tmr_min_max);
5567 coal_cap->int_lat_tmr_max_max =
5568 le16_to_cpu(resp->int_lat_tmr_max_max);
5569 coal_cap->num_cmpl_aggr_int_max =
5570 le16_to_cpu(resp->num_cmpl_aggr_int_max);
5571 coal_cap->timer_units = le16_to_cpu(resp->timer_units);
5572 }
5573 mutex_unlock(&bp->hwrm_cmd_lock);
5574}
5575
5576static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec)
5577{
5578 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5579
5580 return usec * 1000 / coal_cap->timer_units;
5581}
5582
5583static void bnxt_hwrm_set_coal_params(struct bnxt *bp,
5584 struct bnxt_coal *hw_coal,
Michael Chanbb053f52016-02-26 04:00:02 -05005585 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req)
5586{
Michael Chan74706af2018-10-14 07:02:40 -04005587 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5588 u32 cmpl_params = coal_cap->cmpl_params;
5589 u16 val, tmr, max, flags = 0;
Michael Chanf8503962017-10-26 11:51:28 -04005590
5591 max = hw_coal->bufs_per_record * 128;
5592 if (hw_coal->budget)
5593 max = hw_coal->bufs_per_record * hw_coal->budget;
Michael Chan74706af2018-10-14 07:02:40 -04005594 max = min_t(u16, max, coal_cap->num_cmpl_aggr_int_max);
Michael Chanf8503962017-10-26 11:51:28 -04005595
5596 val = clamp_t(u16, hw_coal->coal_bufs, 1, max);
5597 req->num_cmpl_aggr_int = cpu_to_le16(val);
Michael Chanb153cbc2017-11-03 03:32:39 -04005598
Michael Chan74706af2018-10-14 07:02:40 -04005599 val = min_t(u16, val, coal_cap->num_cmpl_dma_aggr_max);
Michael Chanf8503962017-10-26 11:51:28 -04005600 req->num_cmpl_dma_aggr = cpu_to_le16(val);
5601
Michael Chan74706af2018-10-14 07:02:40 -04005602 val = clamp_t(u16, hw_coal->coal_bufs_irq, 1,
5603 coal_cap->num_cmpl_dma_aggr_during_int_max);
Michael Chanf8503962017-10-26 11:51:28 -04005604 req->num_cmpl_dma_aggr_during_int = cpu_to_le16(val);
5605
Michael Chan74706af2018-10-14 07:02:40 -04005606 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks);
5607 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_max_max);
Michael Chanf8503962017-10-26 11:51:28 -04005608 req->int_lat_tmr_max = cpu_to_le16(tmr);
5609
5610 /* min timer set to 1/2 of interrupt timer */
Michael Chan74706af2018-10-14 07:02:40 -04005611 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_INT_LAT_TMR_MIN) {
5612 val = tmr / 2;
5613 val = clamp_t(u16, val, 1, coal_cap->int_lat_tmr_min_max);
5614 req->int_lat_tmr_min = cpu_to_le16(val);
5615 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5616 }
Michael Chanf8503962017-10-26 11:51:28 -04005617
5618 /* buf timer set to 1/4 of interrupt timer */
Michael Chan74706af2018-10-14 07:02:40 -04005619 val = clamp_t(u16, tmr / 4, 1, coal_cap->cmpl_aggr_dma_tmr_max);
Michael Chanf8503962017-10-26 11:51:28 -04005620 req->cmpl_aggr_dma_tmr = cpu_to_le16(val);
5621
Michael Chan74706af2018-10-14 07:02:40 -04005622 if (cmpl_params &
5623 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_NUM_CMPL_DMA_AGGR_DURING_INT) {
5624 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks_irq);
5625 val = clamp_t(u16, tmr, 1,
5626 coal_cap->cmpl_aggr_dma_tmr_during_int_max);
5627 req->cmpl_aggr_dma_tmr_during_int = cpu_to_le16(tmr);
5628 req->enables |=
5629 cpu_to_le16(BNXT_COAL_CMPL_AGGR_TMR_DURING_INT_ENABLE);
5630 }
Michael Chanf8503962017-10-26 11:51:28 -04005631
Michael Chan74706af2018-10-14 07:02:40 -04005632 if (cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET)
5633 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET;
5634 if ((cmpl_params & RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_RING_IDLE) &&
5635 hw_coal->idle_thresh && hw_coal->coal_ticks < hw_coal->idle_thresh)
Michael Chanf8503962017-10-26 11:51:28 -04005636 flags |= RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_RING_IDLE;
Michael Chanbb053f52016-02-26 04:00:02 -05005637 req->flags = cpu_to_le16(flags);
Michael Chan74706af2018-10-14 07:02:40 -04005638 req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES);
Michael Chanbb053f52016-02-26 04:00:02 -05005639}
5640
Michael Chan58590c82018-10-14 07:02:56 -04005641/* Caller holds bp->hwrm_cmd_lock */
5642static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi,
5643 struct bnxt_coal *hw_coal)
5644{
5645 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0};
5646 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5647 struct bnxt_coal_cap *coal_cap = &bp->coal_cap;
5648 u32 nq_params = coal_cap->nq_params;
5649 u16 tmr;
5650
5651 if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN))
5652 return 0;
5653
5654 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS,
5655 -1, -1);
5656 req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id);
5657 req.flags =
5658 cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ);
5659
5660 tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2;
5661 tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max);
5662 req.int_lat_tmr_min = cpu_to_le16(tmr);
5663 req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE);
5664 return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5665}
5666
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005667int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi)
5668{
5669 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0};
5670 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5671 struct bnxt_coal coal;
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005672
5673 /* Tick values in micro seconds.
5674 * 1 coal_buf x bufs_per_record = 1 completion record.
5675 */
5676 memcpy(&coal, &bp->rx_coal, sizeof(struct bnxt_coal));
5677
5678 coal.coal_ticks = cpr->rx_ring_coal.coal_ticks;
5679 coal.coal_bufs = cpr->rx_ring_coal.coal_bufs;
5680
5681 if (!bnapi->rx_ring)
5682 return -ENODEV;
5683
5684 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5685 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5686
Michael Chan74706af2018-10-14 07:02:40 -04005687 bnxt_hwrm_set_coal_params(bp, &coal, &req_rx);
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005688
Michael Chan2c61d212018-10-14 07:02:50 -04005689 req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring));
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05005690
5691 return hwrm_send_message(bp, &req_rx, sizeof(req_rx),
5692 HWRM_CMD_TIMEOUT);
5693}
5694
Michael Chanc0c050c2015-10-22 16:01:17 -04005695int bnxt_hwrm_set_coal(struct bnxt *bp)
5696{
5697 int i, rc = 0;
Michael Chandfc9c942016-02-26 04:00:03 -05005698 struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0},
5699 req_tx = {0}, *req;
Michael Chanc0c050c2015-10-22 16:01:17 -04005700
Michael Chandfc9c942016-02-26 04:00:03 -05005701 bnxt_hwrm_cmd_hdr_init(bp, &req_rx,
5702 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
5703 bnxt_hwrm_cmd_hdr_init(bp, &req_tx,
5704 HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1);
Michael Chanc0c050c2015-10-22 16:01:17 -04005705
Michael Chan74706af2018-10-14 07:02:40 -04005706 bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx);
5707 bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx);
Michael Chanc0c050c2015-10-22 16:01:17 -04005708
5709 mutex_lock(&bp->hwrm_cmd_lock);
5710 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chandfc9c942016-02-26 04:00:03 -05005711 struct bnxt_napi *bnapi = bp->bnapi[i];
Michael Chan58590c82018-10-14 07:02:56 -04005712 struct bnxt_coal *hw_coal;
Michael Chan2c61d212018-10-14 07:02:50 -04005713 u16 ring_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04005714
Michael Chandfc9c942016-02-26 04:00:03 -05005715 req = &req_rx;
Michael Chan2c61d212018-10-14 07:02:50 -04005716 if (!bnapi->rx_ring) {
5717 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
Michael Chandfc9c942016-02-26 04:00:03 -05005718 req = &req_tx;
Michael Chan2c61d212018-10-14 07:02:50 -04005719 } else {
5720 ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring);
5721 }
5722 req->ring_id = cpu_to_le16(ring_id);
Michael Chandfc9c942016-02-26 04:00:03 -05005723
5724 rc = _hwrm_send_message(bp, req, sizeof(*req),
Michael Chanc0c050c2015-10-22 16:01:17 -04005725 HWRM_CMD_TIMEOUT);
5726 if (rc)
5727 break;
Michael Chan58590c82018-10-14 07:02:56 -04005728
5729 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
5730 continue;
5731
5732 if (bnapi->rx_ring && bnapi->tx_ring) {
5733 req = &req_tx;
5734 ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring);
5735 req->ring_id = cpu_to_le16(ring_id);
5736 rc = _hwrm_send_message(bp, req, sizeof(*req),
5737 HWRM_CMD_TIMEOUT);
5738 if (rc)
5739 break;
5740 }
5741 if (bnapi->rx_ring)
5742 hw_coal = &bp->rx_coal;
5743 else
5744 hw_coal = &bp->tx_coal;
5745 __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal);
Michael Chanc0c050c2015-10-22 16:01:17 -04005746 }
5747 mutex_unlock(&bp->hwrm_cmd_lock);
5748 return rc;
5749}
5750
5751static int bnxt_hwrm_stat_ctx_free(struct bnxt *bp)
5752{
5753 int rc = 0, i;
5754 struct hwrm_stat_ctx_free_input req = {0};
5755
5756 if (!bp->bnapi)
5757 return 0;
5758
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04005759 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5760 return 0;
5761
Michael Chanc0c050c2015-10-22 16:01:17 -04005762 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1);
5763
5764 mutex_lock(&bp->hwrm_cmd_lock);
5765 for (i = 0; i < bp->cp_nr_rings; i++) {
5766 struct bnxt_napi *bnapi = bp->bnapi[i];
5767 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5768
5769 if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) {
5770 req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id);
5771
5772 rc = _hwrm_send_message(bp, &req, sizeof(req),
5773 HWRM_CMD_TIMEOUT);
5774 if (rc)
5775 break;
5776
5777 cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID;
5778 }
5779 }
5780 mutex_unlock(&bp->hwrm_cmd_lock);
5781 return rc;
5782}
5783
5784static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp)
5785{
5786 int rc = 0, i;
5787 struct hwrm_stat_ctx_alloc_input req = {0};
5788 struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr;
5789
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04005790 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
5791 return 0;
5792
Michael Chanc0c050c2015-10-22 16:01:17 -04005793 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1);
5794
Michael Chan51f30782016-07-01 18:46:29 -04005795 req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000);
Michael Chanc0c050c2015-10-22 16:01:17 -04005796
5797 mutex_lock(&bp->hwrm_cmd_lock);
5798 for (i = 0; i < bp->cp_nr_rings; i++) {
5799 struct bnxt_napi *bnapi = bp->bnapi[i];
5800 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
5801
5802 req.stats_dma_addr = cpu_to_le64(cpr->hw_stats_map);
5803
5804 rc = _hwrm_send_message(bp, &req, sizeof(req),
5805 HWRM_CMD_TIMEOUT);
5806 if (rc)
5807 break;
5808
5809 cpr->hw_stats_ctx_id = le32_to_cpu(resp->stat_ctx_id);
5810
5811 bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id;
5812 }
5813 mutex_unlock(&bp->hwrm_cmd_lock);
Pan Bian89aa8442016-12-03 17:56:17 +08005814 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04005815}
5816
Michael Chancf6645f2016-06-13 02:25:28 -04005817static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
5818{
5819 struct hwrm_func_qcfg_input req = {0};
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005820 struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan9315edc2017-07-24 12:34:25 -04005821 u16 flags;
Michael Chancf6645f2016-06-13 02:25:28 -04005822 int rc;
5823
5824 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1);
5825 req.fid = cpu_to_le16(0xffff);
5826 mutex_lock(&bp->hwrm_cmd_lock);
5827 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5828 if (rc)
5829 goto func_qcfg_exit;
5830
5831#ifdef CONFIG_BNXT_SRIOV
5832 if (BNXT_VF(bp)) {
Michael Chancf6645f2016-06-13 02:25:28 -04005833 struct bnxt_vf_info *vf = &bp->vf;
5834
5835 vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
5836 }
5837#endif
Michael Chan9315edc2017-07-24 12:34:25 -04005838 flags = le16_to_cpu(resp->flags);
5839 if (flags & (FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED |
5840 FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED)) {
Michael Chan97381a12018-08-05 16:51:54 -04005841 bp->fw_cap |= BNXT_FW_CAP_LLDP_AGENT;
Michael Chan9315edc2017-07-24 12:34:25 -04005842 if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED)
Michael Chan97381a12018-08-05 16:51:54 -04005843 bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT;
Deepak Khungar9e54e322017-04-21 20:11:26 -04005844 }
Michael Chan9315edc2017-07-24 12:34:25 -04005845 if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST))
5846 bp->flags |= BNXT_FLAG_MULTI_HOST;
Michael Chanbc39f882017-03-08 18:44:34 -05005847
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04005848 switch (resp->port_partition_type) {
5849 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
5850 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
5851 case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR2_0:
5852 bp->port_partition_type = resp->port_partition_type;
5853 break;
5854 }
Michael Chan32e8239c2017-07-24 12:34:21 -04005855 if (bp->hwrm_spec_code < 0x10707 ||
5856 resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEB)
5857 bp->br_mode = BRIDGE_MODE_VEB;
5858 else if (resp->evb_mode == FUNC_QCFG_RESP_EVB_MODE_VEPA)
5859 bp->br_mode = BRIDGE_MODE_VEPA;
5860 else
5861 bp->br_mode = BRIDGE_MODE_UNDEF;
Michael Chancf6645f2016-06-13 02:25:28 -04005862
Michael Chan7eb9bb32017-10-26 11:51:25 -04005863 bp->max_mtu = le16_to_cpu(resp->max_mtu_configured);
5864 if (!bp->max_mtu)
5865 bp->max_mtu = BNXT_MAX_MTU;
5866
Michael Chancf6645f2016-06-13 02:25:28 -04005867func_qcfg_exit:
5868 mutex_unlock(&bp->hwrm_cmd_lock);
5869 return rc;
5870}
5871
Michael Chan98f04cf2018-10-14 07:02:43 -04005872static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
5873{
5874 struct hwrm_func_backing_store_qcaps_input req = {0};
5875 struct hwrm_func_backing_store_qcaps_output *resp =
5876 bp->hwrm_cmd_resp_addr;
5877 int rc;
5878
5879 if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx)
5880 return 0;
5881
5882 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1);
5883 mutex_lock(&bp->hwrm_cmd_lock);
5884 rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
5885 if (!rc) {
5886 struct bnxt_ctx_pg_info *ctx_pg;
5887 struct bnxt_ctx_mem_info *ctx;
5888 int i;
5889
5890 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
5891 if (!ctx) {
5892 rc = -ENOMEM;
5893 goto ctx_err;
5894 }
5895 ctx_pg = kzalloc(sizeof(*ctx_pg) * (bp->max_q + 1), GFP_KERNEL);
5896 if (!ctx_pg) {
5897 kfree(ctx);
5898 rc = -ENOMEM;
5899 goto ctx_err;
5900 }
5901 for (i = 0; i < bp->max_q + 1; i++, ctx_pg++)
5902 ctx->tqm_mem[i] = ctx_pg;
5903
5904 bp->ctx = ctx;
5905 ctx->qp_max_entries = le32_to_cpu(resp->qp_max_entries);
5906 ctx->qp_min_qp1_entries = le16_to_cpu(resp->qp_min_qp1_entries);
5907 ctx->qp_max_l2_entries = le16_to_cpu(resp->qp_max_l2_entries);
5908 ctx->qp_entry_size = le16_to_cpu(resp->qp_entry_size);
5909 ctx->srq_max_l2_entries = le16_to_cpu(resp->srq_max_l2_entries);
5910 ctx->srq_max_entries = le32_to_cpu(resp->srq_max_entries);
5911 ctx->srq_entry_size = le16_to_cpu(resp->srq_entry_size);
5912 ctx->cq_max_l2_entries = le16_to_cpu(resp->cq_max_l2_entries);
5913 ctx->cq_max_entries = le32_to_cpu(resp->cq_max_entries);
5914 ctx->cq_entry_size = le16_to_cpu(resp->cq_entry_size);
5915 ctx->vnic_max_vnic_entries =
5916 le16_to_cpu(resp->vnic_max_vnic_entries);
5917 ctx->vnic_max_ring_table_entries =
5918 le16_to_cpu(resp->vnic_max_ring_table_entries);
5919 ctx->vnic_entry_size = le16_to_cpu(resp->vnic_entry_size);
5920 ctx->stat_max_entries = le32_to_cpu(resp->stat_max_entries);
5921 ctx->stat_entry_size = le16_to_cpu(resp->stat_entry_size);
5922 ctx->tqm_entry_size = le16_to_cpu(resp->tqm_entry_size);
5923 ctx->tqm_min_entries_per_ring =
5924 le32_to_cpu(resp->tqm_min_entries_per_ring);
5925 ctx->tqm_max_entries_per_ring =
5926 le32_to_cpu(resp->tqm_max_entries_per_ring);
5927 ctx->tqm_entries_multiple = resp->tqm_entries_multiple;
5928 if (!ctx->tqm_entries_multiple)
5929 ctx->tqm_entries_multiple = 1;
5930 ctx->mrav_max_entries = le32_to_cpu(resp->mrav_max_entries);
5931 ctx->mrav_entry_size = le16_to_cpu(resp->mrav_entry_size);
5932 ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
5933 ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
5934 } else {
5935 rc = 0;
5936 }
5937ctx_err:
5938 mutex_unlock(&bp->hwrm_cmd_lock);
5939 return rc;
5940}
5941
Michael Chan1b9394e2018-10-14 07:02:44 -04005942static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
5943 __le64 *pg_dir)
5944{
5945 u8 pg_size = 0;
5946
5947 if (BNXT_PAGE_SHIFT == 13)
5948 pg_size = 1 << 4;
5949 else if (BNXT_PAGE_SIZE == 16)
5950 pg_size = 2 << 4;
5951
5952 *pg_attr = pg_size;
5953 if (rmem->nr_pages > 1) {
5954 *pg_attr |= 1;
5955 *pg_dir = cpu_to_le64(rmem->pg_tbl_map);
5956 } else {
5957 *pg_dir = cpu_to_le64(rmem->dma_arr[0]);
5958 }
5959}
5960
5961#define FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES \
5962 (FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP | \
5963 FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ | \
5964 FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ | \
5965 FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC | \
5966 FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT)
5967
5968static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables)
5969{
5970 struct hwrm_func_backing_store_cfg_input req = {0};
5971 struct bnxt_ctx_mem_info *ctx = bp->ctx;
5972 struct bnxt_ctx_pg_info *ctx_pg;
5973 __le32 *num_entries;
5974 __le64 *pg_dir;
5975 u8 *pg_attr;
5976 int i, rc;
5977 u32 ena;
5978
5979 if (!ctx)
5980 return 0;
5981
5982 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1);
5983 req.enables = cpu_to_le32(enables);
5984
5985 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) {
5986 ctx_pg = &ctx->qp_mem;
5987 req.qp_num_entries = cpu_to_le32(ctx_pg->entries);
5988 req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries);
5989 req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries);
5990 req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size);
5991 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
5992 &req.qpc_pg_size_qpc_lvl,
5993 &req.qpc_page_dir);
5994 }
5995 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) {
5996 ctx_pg = &ctx->srq_mem;
5997 req.srq_num_entries = cpu_to_le32(ctx_pg->entries);
5998 req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries);
5999 req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size);
6000 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6001 &req.srq_pg_size_srq_lvl,
6002 &req.srq_page_dir);
6003 }
6004 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) {
6005 ctx_pg = &ctx->cq_mem;
6006 req.cq_num_entries = cpu_to_le32(ctx_pg->entries);
6007 req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries);
6008 req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size);
6009 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl,
6010 &req.cq_page_dir);
6011 }
6012 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) {
6013 ctx_pg = &ctx->vnic_mem;
6014 req.vnic_num_vnic_entries =
6015 cpu_to_le16(ctx->vnic_max_vnic_entries);
6016 req.vnic_num_ring_table_entries =
6017 cpu_to_le16(ctx->vnic_max_ring_table_entries);
6018 req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size);
6019 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6020 &req.vnic_pg_size_vnic_lvl,
6021 &req.vnic_page_dir);
6022 }
6023 if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) {
6024 ctx_pg = &ctx->stat_mem;
6025 req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries);
6026 req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size);
6027 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem,
6028 &req.stat_pg_size_stat_lvl,
6029 &req.stat_page_dir);
6030 }
6031 for (i = 0, num_entries = &req.tqm_sp_num_entries,
6032 pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl,
6033 pg_dir = &req.tqm_sp_page_dir,
6034 ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP;
6035 i < 9; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) {
6036 if (!(enables & ena))
6037 continue;
6038
6039 req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size);
6040 ctx_pg = ctx->tqm_mem[i];
6041 *num_entries = cpu_to_le32(ctx_pg->entries);
6042 bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir);
6043 }
6044 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6045 if (rc)
6046 rc = -EIO;
6047 return rc;
6048}
6049
Michael Chan98f04cf2018-10-14 07:02:43 -04006050static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
6051 struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size)
6052{
6053 struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
6054
6055 if (!mem_size)
6056 return 0;
6057
6058 rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
6059 if (rmem->nr_pages > MAX_CTX_PAGES) {
6060 rmem->nr_pages = 0;
6061 return -EINVAL;
6062 }
6063 rmem->page_size = BNXT_PAGE_SIZE;
6064 rmem->pg_arr = ctx_pg->ctx_pg_arr;
6065 rmem->dma_arr = ctx_pg->ctx_dma_arr;
Michael Chan1b9394e2018-10-14 07:02:44 -04006066 rmem->flags = BNXT_RMEM_VALID_PTE_FLAG;
Michael Chan98f04cf2018-10-14 07:02:43 -04006067 return bnxt_alloc_ring(bp, rmem);
6068}
6069
6070static void bnxt_free_ctx_mem(struct bnxt *bp)
6071{
6072 struct bnxt_ctx_mem_info *ctx = bp->ctx;
6073 int i;
6074
6075 if (!ctx)
6076 return;
6077
6078 if (ctx->tqm_mem[0]) {
6079 for (i = 0; i < bp->max_q + 1; i++)
6080 bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem);
6081 kfree(ctx->tqm_mem[0]);
6082 ctx->tqm_mem[0] = NULL;
6083 }
6084
6085 bnxt_free_ring(bp, &ctx->stat_mem.ring_mem);
6086 bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem);
6087 bnxt_free_ring(bp, &ctx->cq_mem.ring_mem);
6088 bnxt_free_ring(bp, &ctx->srq_mem.ring_mem);
6089 bnxt_free_ring(bp, &ctx->qp_mem.ring_mem);
6090 ctx->flags &= ~BNXT_CTX_FLAG_INITED;
6091}
6092
6093static int bnxt_alloc_ctx_mem(struct bnxt *bp)
6094{
6095 struct bnxt_ctx_pg_info *ctx_pg;
6096 struct bnxt_ctx_mem_info *ctx;
Michael Chan1b9394e2018-10-14 07:02:44 -04006097 u32 mem_size, ena, entries;
Michael Chan98f04cf2018-10-14 07:02:43 -04006098 int i, rc;
6099
6100 rc = bnxt_hwrm_func_backing_store_qcaps(bp);
6101 if (rc) {
6102 netdev_err(bp->dev, "Failed querying context mem capability, rc = %d.\n",
6103 rc);
6104 return rc;
6105 }
6106 ctx = bp->ctx;
6107 if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED))
6108 return 0;
6109
6110 ctx_pg = &ctx->qp_mem;
6111 ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries;
6112 mem_size = ctx->qp_entry_size * ctx_pg->entries;
6113 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6114 if (rc)
6115 return rc;
6116
6117 ctx_pg = &ctx->srq_mem;
6118 ctx_pg->entries = ctx->srq_max_l2_entries;
6119 mem_size = ctx->srq_entry_size * ctx_pg->entries;
6120 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6121 if (rc)
6122 return rc;
6123
6124 ctx_pg = &ctx->cq_mem;
6125 ctx_pg->entries = ctx->cq_max_l2_entries;
6126 mem_size = ctx->cq_entry_size * ctx_pg->entries;
6127 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6128 if (rc)
6129 return rc;
6130
6131 ctx_pg = &ctx->vnic_mem;
6132 ctx_pg->entries = ctx->vnic_max_vnic_entries +
6133 ctx->vnic_max_ring_table_entries;
6134 mem_size = ctx->vnic_entry_size * ctx_pg->entries;
6135 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6136 if (rc)
6137 return rc;
6138
6139 ctx_pg = &ctx->stat_mem;
6140 ctx_pg->entries = ctx->stat_max_entries;
6141 mem_size = ctx->stat_entry_size * ctx_pg->entries;
6142 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6143 if (rc)
6144 return rc;
6145
6146 entries = ctx->qp_max_l2_entries;
6147 entries = roundup(entries, ctx->tqm_entries_multiple);
6148 entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring,
6149 ctx->tqm_max_entries_per_ring);
Michael Chan1b9394e2018-10-14 07:02:44 -04006150 for (i = 0, ena = 0; i < bp->max_q + 1; i++) {
Michael Chan98f04cf2018-10-14 07:02:43 -04006151 ctx_pg = ctx->tqm_mem[i];
6152 ctx_pg->entries = entries;
6153 mem_size = ctx->tqm_entry_size * entries;
6154 rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size);
6155 if (rc)
6156 return rc;
Michael Chan1b9394e2018-10-14 07:02:44 -04006157 ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
Michael Chan98f04cf2018-10-14 07:02:43 -04006158 }
Michael Chan1b9394e2018-10-14 07:02:44 -04006159 ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
6160 rc = bnxt_hwrm_func_backing_store_cfg(bp, ena);
6161 if (rc)
6162 netdev_err(bp->dev, "Failed configuring context mem, rc = %d.\n",
6163 rc);
6164 else
6165 ctx->flags |= BNXT_CTX_FLAG_INITED;
6166
Michael Chan98f04cf2018-10-14 07:02:43 -04006167 return 0;
6168}
6169
Michael Chandb4723b2018-03-31 13:54:13 -04006170int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all)
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006171{
6172 struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
6173 struct hwrm_func_resource_qcaps_input req = {0};
6174 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6175 int rc;
6176
6177 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1);
6178 req.fid = cpu_to_le16(0xffff);
6179
6180 mutex_lock(&bp->hwrm_cmd_lock);
6181 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6182 if (rc) {
6183 rc = -EIO;
6184 goto hwrm_func_resc_qcaps_exit;
6185 }
6186
Michael Chandb4723b2018-03-31 13:54:13 -04006187 hw_resc->max_tx_sch_inputs = le16_to_cpu(resp->max_tx_scheduler_inputs);
6188 if (!all)
6189 goto hwrm_func_resc_qcaps_exit;
6190
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006191 hw_resc->min_rsscos_ctxs = le16_to_cpu(resp->min_rsscos_ctx);
6192 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6193 hw_resc->min_cp_rings = le16_to_cpu(resp->min_cmpl_rings);
6194 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6195 hw_resc->min_tx_rings = le16_to_cpu(resp->min_tx_rings);
6196 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6197 hw_resc->min_rx_rings = le16_to_cpu(resp->min_rx_rings);
6198 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6199 hw_resc->min_hw_ring_grps = le16_to_cpu(resp->min_hw_ring_grps);
6200 hw_resc->max_hw_ring_grps = le16_to_cpu(resp->max_hw_ring_grps);
6201 hw_resc->min_l2_ctxs = le16_to_cpu(resp->min_l2_ctxs);
6202 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6203 hw_resc->min_vnics = le16_to_cpu(resp->min_vnics);
6204 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6205 hw_resc->min_stat_ctxs = le16_to_cpu(resp->min_stat_ctx);
6206 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6207
Michael Chan9c1fabd2018-10-14 07:02:47 -04006208 if (bp->flags & BNXT_FLAG_CHIP_P5) {
6209 u16 max_msix = le16_to_cpu(resp->max_msix);
6210
6211 hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix);
6212 hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings;
6213 }
6214
Michael Chan4673d662018-01-17 03:21:11 -05006215 if (BNXT_PF(bp)) {
6216 struct bnxt_pf_info *pf = &bp->pf;
6217
6218 pf->vf_resv_strategy =
6219 le16_to_cpu(resp->vf_reservation_strategy);
Michael Chanbf827362018-08-05 16:51:50 -04006220 if (pf->vf_resv_strategy > BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC)
Michael Chan4673d662018-01-17 03:21:11 -05006221 pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL;
6222 }
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006223hwrm_func_resc_qcaps_exit:
6224 mutex_unlock(&bp->hwrm_cmd_lock);
6225 return rc;
6226}
6227
6228static int __bnxt_hwrm_func_qcaps(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04006229{
6230 int rc = 0;
6231 struct hwrm_func_qcaps_input req = {0};
6232 struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan6a4f2942018-01-17 03:21:06 -05006233 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
6234 u32 flags;
Michael Chanc0c050c2015-10-22 16:01:17 -04006235
6236 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1);
6237 req.fid = cpu_to_le16(0xffff);
6238
6239 mutex_lock(&bp->hwrm_cmd_lock);
6240 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6241 if (rc)
6242 goto hwrm_func_qcaps_exit;
6243
Michael Chan6a4f2942018-01-17 03:21:06 -05006244 flags = le32_to_cpu(resp->flags);
6245 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V1_SUPPORTED)
Michael Chane4060d32016-12-07 00:26:19 -05006246 bp->flags |= BNXT_FLAG_ROCEV1_CAP;
Michael Chan6a4f2942018-01-17 03:21:06 -05006247 if (flags & FUNC_QCAPS_RESP_FLAGS_ROCE_V2_SUPPORTED)
Michael Chane4060d32016-12-07 00:26:19 -05006248 bp->flags |= BNXT_FLAG_ROCEV2_CAP;
6249
Michael Chan7cc5a202016-09-19 03:58:05 -04006250 bp->tx_push_thresh = 0;
Michael Chan6a4f2942018-01-17 03:21:06 -05006251 if (flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED)
Michael Chan7cc5a202016-09-19 03:58:05 -04006252 bp->tx_push_thresh = BNXT_TX_PUSH_THRESH;
6253
Michael Chan6a4f2942018-01-17 03:21:06 -05006254 hw_resc->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
6255 hw_resc->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);
6256 hw_resc->max_tx_rings = le16_to_cpu(resp->max_tx_rings);
6257 hw_resc->max_rx_rings = le16_to_cpu(resp->max_rx_rings);
6258 hw_resc->max_hw_ring_grps = le32_to_cpu(resp->max_hw_ring_grps);
6259 if (!hw_resc->max_hw_ring_grps)
6260 hw_resc->max_hw_ring_grps = hw_resc->max_tx_rings;
6261 hw_resc->max_l2_ctxs = le16_to_cpu(resp->max_l2_ctxs);
6262 hw_resc->max_vnics = le16_to_cpu(resp->max_vnics);
6263 hw_resc->max_stat_ctxs = le16_to_cpu(resp->max_stat_ctx);
6264
Michael Chanc0c050c2015-10-22 16:01:17 -04006265 if (BNXT_PF(bp)) {
6266 struct bnxt_pf_info *pf = &bp->pf;
6267
6268 pf->fw_fid = le16_to_cpu(resp->fid);
6269 pf->port_id = le16_to_cpu(resp->port_id);
Michael Chan87027db2016-07-01 18:46:28 -04006270 bp->dev->dev_port = pf->port_id;
Michael Chan11f15ed2016-04-05 14:08:55 -04006271 memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04006272 pf->first_vf_id = le16_to_cpu(resp->first_vf_id);
6273 pf->max_vfs = le16_to_cpu(resp->max_vfs);
6274 pf->max_encap_records = le32_to_cpu(resp->max_encap_records);
6275 pf->max_decap_records = le32_to_cpu(resp->max_decap_records);
6276 pf->max_tx_em_flows = le32_to_cpu(resp->max_tx_em_flows);
6277 pf->max_tx_wm_flows = le32_to_cpu(resp->max_tx_wm_flows);
6278 pf->max_rx_em_flows = le32_to_cpu(resp->max_rx_em_flows);
6279 pf->max_rx_wm_flows = le32_to_cpu(resp->max_rx_wm_flows);
Michael Chan6a4f2942018-01-17 03:21:06 -05006280 if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED)
Michael Chanc1ef1462017-04-04 18:14:07 -04006281 bp->flags |= BNXT_FLAG_WOL_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -04006282 } else {
Michael Chan379a80a2015-10-23 15:06:19 -04006283#ifdef CONFIG_BNXT_SRIOV
Michael Chanc0c050c2015-10-22 16:01:17 -04006284 struct bnxt_vf_info *vf = &bp->vf;
6285
6286 vf->fw_fid = le16_to_cpu(resp->fid);
Michael Chan7cc5a202016-09-19 03:58:05 -04006287 memcpy(vf->mac_addr, resp->mac_address, ETH_ALEN);
Michael Chan379a80a2015-10-23 15:06:19 -04006288#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04006289 }
6290
Michael Chanc0c050c2015-10-22 16:01:17 -04006291hwrm_func_qcaps_exit:
6292 mutex_unlock(&bp->hwrm_cmd_lock);
6293 return rc;
6294}
6295
Michael Chan804fba42018-12-09 07:00:59 -05006296static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp);
6297
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006298static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
6299{
6300 int rc;
6301
6302 rc = __bnxt_hwrm_func_qcaps(bp);
6303 if (rc)
6304 return rc;
Michael Chan804fba42018-12-09 07:00:59 -05006305 rc = bnxt_hwrm_queue_qportcfg(bp);
6306 if (rc) {
6307 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %d\n", rc);
6308 return rc;
6309 }
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006310 if (bp->hwrm_spec_code >= 0x10803) {
Michael Chan98f04cf2018-10-14 07:02:43 -04006311 rc = bnxt_alloc_ctx_mem(bp);
6312 if (rc)
6313 return rc;
Michael Chandb4723b2018-03-31 13:54:13 -04006314 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006315 if (!rc)
Michael Chan97381a12018-08-05 16:51:54 -04006316 bp->fw_cap |= BNXT_FW_CAP_NEW_RM;
Michael Chanbe0dd9c2018-01-17 03:21:07 -05006317 }
6318 return 0;
6319}
6320
Michael Chanc0c050c2015-10-22 16:01:17 -04006321static int bnxt_hwrm_func_reset(struct bnxt *bp)
6322{
6323 struct hwrm_func_reset_input req = {0};
6324
6325 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1);
6326 req.enables = 0;
6327
6328 return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT);
6329}
6330
6331static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp)
6332{
6333 int rc = 0;
6334 struct hwrm_queue_qportcfg_input req = {0};
6335 struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chanaabfc012018-08-05 16:51:58 -04006336 u8 i, j, *qptr;
6337 bool no_rdma;
Michael Chanc0c050c2015-10-22 16:01:17 -04006338
6339 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1);
6340
6341 mutex_lock(&bp->hwrm_cmd_lock);
6342 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6343 if (rc)
6344 goto qportcfg_exit;
6345
6346 if (!resp->max_configurable_queues) {
6347 rc = -EINVAL;
6348 goto qportcfg_exit;
6349 }
6350 bp->max_tc = resp->max_configurable_queues;
Michael Chan87c374d2016-12-02 21:17:16 -05006351 bp->max_lltc = resp->max_configurable_lossless_queues;
Michael Chanc0c050c2015-10-22 16:01:17 -04006352 if (bp->max_tc > BNXT_MAX_QUEUE)
6353 bp->max_tc = BNXT_MAX_QUEUE;
6354
Michael Chanaabfc012018-08-05 16:51:58 -04006355 no_rdma = !(bp->flags & BNXT_FLAG_ROCE_CAP);
6356 qptr = &resp->queue_id0;
6357 for (i = 0, j = 0; i < bp->max_tc; i++) {
Michael Chan98f04cf2018-10-14 07:02:43 -04006358 bp->q_info[j].queue_id = *qptr;
6359 bp->q_ids[i] = *qptr++;
Michael Chanaabfc012018-08-05 16:51:58 -04006360 bp->q_info[j].queue_profile = *qptr++;
6361 bp->tc_to_qidx[j] = j;
6362 if (!BNXT_CNPQ(bp->q_info[j].queue_profile) ||
6363 (no_rdma && BNXT_PF(bp)))
6364 j++;
6365 }
Michael Chan98f04cf2018-10-14 07:02:43 -04006366 bp->max_q = bp->max_tc;
Michael Chanaabfc012018-08-05 16:51:58 -04006367 bp->max_tc = max_t(u8, j, 1);
6368
Michael Chan441cabb2016-09-19 03:58:02 -04006369 if (resp->queue_cfg_info & QUEUE_QPORTCFG_RESP_QUEUE_CFG_INFO_ASYM_CFG)
6370 bp->max_tc = 1;
6371
Michael Chan87c374d2016-12-02 21:17:16 -05006372 if (bp->max_lltc > bp->max_tc)
6373 bp->max_lltc = bp->max_tc;
6374
Michael Chanc0c050c2015-10-22 16:01:17 -04006375qportcfg_exit:
6376 mutex_unlock(&bp->hwrm_cmd_lock);
6377 return rc;
6378}
6379
6380static int bnxt_hwrm_ver_get(struct bnxt *bp)
6381{
6382 int rc;
6383 struct hwrm_ver_get_input req = {0};
6384 struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr;
Deepak Khungare605db82017-05-29 19:06:04 -04006385 u32 dev_caps_cfg;
Michael Chanc0c050c2015-10-22 16:01:17 -04006386
Michael Chane6ef2692016-03-28 19:46:05 -04006387 bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN;
Michael Chanc0c050c2015-10-22 16:01:17 -04006388 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1);
6389 req.hwrm_intf_maj = HWRM_VERSION_MAJOR;
6390 req.hwrm_intf_min = HWRM_VERSION_MINOR;
6391 req.hwrm_intf_upd = HWRM_VERSION_UPDATE;
6392 mutex_lock(&bp->hwrm_cmd_lock);
6393 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6394 if (rc)
6395 goto hwrm_ver_get_exit;
6396
6397 memcpy(&bp->ver_resp, resp, sizeof(struct hwrm_ver_get_output));
6398
Michael Chan894aa692018-01-17 03:21:03 -05006399 bp->hwrm_spec_code = resp->hwrm_intf_maj_8b << 16 |
6400 resp->hwrm_intf_min_8b << 8 |
6401 resp->hwrm_intf_upd_8b;
6402 if (resp->hwrm_intf_maj_8b < 1) {
Michael Chanc1935542015-12-27 18:19:28 -05006403 netdev_warn(bp->dev, "HWRM interface %d.%d.%d is older than 1.0.0.\n",
Michael Chan894aa692018-01-17 03:21:03 -05006404 resp->hwrm_intf_maj_8b, resp->hwrm_intf_min_8b,
6405 resp->hwrm_intf_upd_8b);
Michael Chanc1935542015-12-27 18:19:28 -05006406 netdev_warn(bp->dev, "Please update firmware with HWRM interface 1.0.0 or newer.\n");
Michael Chanc0c050c2015-10-22 16:01:17 -04006407 }
Michael Chan431aa1e2017-10-26 11:51:23 -04006408 snprintf(bp->fw_ver_str, BC_HWRM_STR_LEN, "%d.%d.%d.%d",
Michael Chan894aa692018-01-17 03:21:03 -05006409 resp->hwrm_fw_maj_8b, resp->hwrm_fw_min_8b,
6410 resp->hwrm_fw_bld_8b, resp->hwrm_fw_rsvd_8b);
Michael Chanc0c050c2015-10-22 16:01:17 -04006411
Michael Chanff4fe812016-02-26 04:00:04 -05006412 bp->hwrm_cmd_timeout = le16_to_cpu(resp->def_req_timeout);
6413 if (!bp->hwrm_cmd_timeout)
6414 bp->hwrm_cmd_timeout = DFLT_HWRM_CMD_TIMEOUT;
6415
Michael Chan1dfddc42018-10-14 07:02:39 -04006416 if (resp->hwrm_intf_maj_8b >= 1) {
Michael Chane6ef2692016-03-28 19:46:05 -04006417 bp->hwrm_max_req_len = le16_to_cpu(resp->max_req_win_len);
Michael Chan1dfddc42018-10-14 07:02:39 -04006418 bp->hwrm_max_ext_req_len = le16_to_cpu(resp->max_ext_req_len);
6419 }
6420 if (bp->hwrm_max_ext_req_len < HWRM_MAX_REQ_LEN)
6421 bp->hwrm_max_ext_req_len = HWRM_MAX_REQ_LEN;
Michael Chane6ef2692016-03-28 19:46:05 -04006422
Michael Chan659c8052016-06-13 02:25:33 -04006423 bp->chip_num = le16_to_cpu(resp->chip_num);
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -04006424 if (bp->chip_num == CHIP_NUM_58700 && !resp->chip_rev &&
6425 !resp->chip_metal)
6426 bp->flags |= BNXT_FLAG_CHIP_NITRO_A0;
Michael Chan659c8052016-06-13 02:25:33 -04006427
Deepak Khungare605db82017-05-29 19:06:04 -04006428 dev_caps_cfg = le32_to_cpu(resp->dev_caps_cfg);
6429 if ((dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED) &&
6430 (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED))
Michael Chan97381a12018-08-05 16:51:54 -04006431 bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD;
Deepak Khungare605db82017-05-29 19:06:04 -04006432
Michael Chanc0c050c2015-10-22 16:01:17 -04006433hwrm_ver_get_exit:
6434 mutex_unlock(&bp->hwrm_cmd_lock);
6435 return rc;
6436}
6437
Rob Swindell5ac67d82016-09-19 03:58:03 -04006438int bnxt_hwrm_fw_set_time(struct bnxt *bp)
6439{
6440 struct hwrm_fw_set_time_input req = {0};
Arnd Bergmann7dfaa7b2017-11-06 15:04:39 +01006441 struct tm tm;
6442 time64_t now = ktime_get_real_seconds();
Rob Swindell5ac67d82016-09-19 03:58:03 -04006443
Michael Chanca2c39e2018-04-26 17:44:34 -04006444 if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) ||
6445 bp->hwrm_spec_code < 0x10400)
Rob Swindell5ac67d82016-09-19 03:58:03 -04006446 return -EOPNOTSUPP;
6447
Arnd Bergmann7dfaa7b2017-11-06 15:04:39 +01006448 time64_to_tm(now, 0, &tm);
Rob Swindell5ac67d82016-09-19 03:58:03 -04006449 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1);
6450 req.year = cpu_to_le16(1900 + tm.tm_year);
6451 req.month = 1 + tm.tm_mon;
6452 req.day = tm.tm_mday;
6453 req.hour = tm.tm_hour;
6454 req.minute = tm.tm_min;
6455 req.second = tm.tm_sec;
6456 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6457}
6458
Michael Chan3bdf56c2016-03-07 15:38:45 -05006459static int bnxt_hwrm_port_qstats(struct bnxt *bp)
6460{
6461 int rc;
6462 struct bnxt_pf_info *pf = &bp->pf;
6463 struct hwrm_port_qstats_input req = {0};
6464
6465 if (!(bp->flags & BNXT_FLAG_PORT_STATS))
6466 return 0;
6467
6468 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1);
6469 req.port_id = cpu_to_le16(pf->port_id);
6470 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_map);
6471 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_map);
6472 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6473 return rc;
6474}
6475
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006476static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp)
6477{
Michael Chan36e53342018-10-14 07:02:38 -04006478 struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006479 struct hwrm_port_qstats_ext_input req = {0};
6480 struct bnxt_pf_info *pf = &bp->pf;
Michael Chan36e53342018-10-14 07:02:38 -04006481 int rc;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006482
6483 if (!(bp->flags & BNXT_FLAG_PORT_STATS_EXT))
6484 return 0;
6485
6486 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1);
6487 req.port_id = cpu_to_le16(pf->port_id);
6488 req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext));
6489 req.rx_stat_host_addr = cpu_to_le64(bp->hw_rx_port_stats_ext_map);
Michael Chan36e53342018-10-14 07:02:38 -04006490 req.tx_stat_size = cpu_to_le16(sizeof(struct tx_port_stats_ext));
6491 req.tx_stat_host_addr = cpu_to_le64(bp->hw_tx_port_stats_ext_map);
6492 mutex_lock(&bp->hwrm_cmd_lock);
6493 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6494 if (!rc) {
6495 bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8;
6496 bp->fw_tx_stats_ext_size = le16_to_cpu(resp->tx_stat_size) / 8;
6497 } else {
6498 bp->fw_rx_stats_ext_size = 0;
6499 bp->fw_tx_stats_ext_size = 0;
6500 }
6501 mutex_unlock(&bp->hwrm_cmd_lock);
6502 return rc;
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04006503}
6504
Michael Chanc0c050c2015-10-22 16:01:17 -04006505static void bnxt_hwrm_free_tunnel_ports(struct bnxt *bp)
6506{
6507 if (bp->vxlan_port_cnt) {
6508 bnxt_hwrm_tunnel_dst_port_free(
6509 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
6510 }
6511 bp->vxlan_port_cnt = 0;
6512 if (bp->nge_port_cnt) {
6513 bnxt_hwrm_tunnel_dst_port_free(
6514 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
6515 }
6516 bp->nge_port_cnt = 0;
6517}
6518
6519static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa)
6520{
6521 int rc, i;
6522 u32 tpa_flags = 0;
6523
6524 if (set_tpa)
6525 tpa_flags = bp->flags & BNXT_FLAG_TPA;
6526 for (i = 0; i < bp->nr_vnics; i++) {
6527 rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags);
6528 if (rc) {
6529 netdev_err(bp->dev, "hwrm vnic set tpa failure rc for vnic %d: %x\n",
Sankar Patchineelam23e12c82017-03-28 19:47:30 -04006530 i, rc);
Michael Chanc0c050c2015-10-22 16:01:17 -04006531 return rc;
6532 }
6533 }
6534 return 0;
6535}
6536
6537static void bnxt_hwrm_clear_vnic_rss(struct bnxt *bp)
6538{
6539 int i;
6540
6541 for (i = 0; i < bp->nr_vnics; i++)
6542 bnxt_hwrm_vnic_set_rss(bp, i, false);
6543}
6544
6545static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path,
6546 bool irq_re_init)
6547{
6548 if (bp->vnic_info) {
6549 bnxt_hwrm_clear_vnic_filter(bp);
6550 /* clear all RSS setting before free vnic ctx */
6551 bnxt_hwrm_clear_vnic_rss(bp);
6552 bnxt_hwrm_vnic_ctx_free(bp);
6553 /* before free the vnic, undo the vnic tpa settings */
6554 if (bp->flags & BNXT_FLAG_TPA)
6555 bnxt_set_tpa(bp, false);
6556 bnxt_hwrm_vnic_free(bp);
6557 }
6558 bnxt_hwrm_ring_free(bp, close_path);
6559 bnxt_hwrm_ring_grp_free(bp);
6560 if (irq_re_init) {
6561 bnxt_hwrm_stat_ctx_free(bp);
6562 bnxt_hwrm_free_tunnel_ports(bp);
6563 }
6564}
6565
Michael Chan39d8ba22017-07-24 12:34:22 -04006566static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode)
6567{
6568 struct hwrm_func_cfg_input req = {0};
6569 int rc;
6570
6571 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6572 req.fid = cpu_to_le16(0xffff);
6573 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE);
6574 if (br_mode == BRIDGE_MODE_VEB)
6575 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB;
6576 else if (br_mode == BRIDGE_MODE_VEPA)
6577 req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA;
6578 else
6579 return -EINVAL;
6580 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6581 if (rc)
6582 rc = -EIO;
6583 return rc;
6584}
6585
Michael Chanc3480a62018-01-17 03:21:15 -05006586static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size)
6587{
6588 struct hwrm_func_cfg_input req = {0};
6589 int rc;
6590
6591 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803)
6592 return 0;
6593
6594 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1);
6595 req.fid = cpu_to_le16(0xffff);
6596 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE);
Michael Chand4f52de02018-03-31 13:54:06 -04006597 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64;
Michael Chanc3480a62018-01-17 03:21:15 -05006598 if (size == 128)
Michael Chand4f52de02018-03-31 13:54:06 -04006599 req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128;
Michael Chanc3480a62018-01-17 03:21:15 -05006600
6601 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
6602 if (rc)
6603 rc = -EIO;
6604 return rc;
6605}
6606
Michael Chan7b3af4f2018-10-14 07:02:54 -04006607static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
Michael Chanc0c050c2015-10-22 16:01:17 -04006608{
Michael Chanae10ae72016-12-29 12:13:38 -05006609 struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id];
Michael Chanc0c050c2015-10-22 16:01:17 -04006610 int rc;
6611
Michael Chanae10ae72016-12-29 12:13:38 -05006612 if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG)
6613 goto skip_rss_ctx;
6614
Michael Chanc0c050c2015-10-22 16:01:17 -04006615 /* allocate context for vnic */
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04006616 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04006617 if (rc) {
6618 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6619 vnic_id, rc);
6620 goto vnic_setup_err;
6621 }
6622 bp->rsscos_nr_ctxs++;
6623
Prashant Sreedharan94ce9ca2016-07-18 07:15:21 -04006624 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6625 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, 1);
6626 if (rc) {
6627 netdev_err(bp->dev, "hwrm vnic %d cos ctx alloc failure rc: %x\n",
6628 vnic_id, rc);
6629 goto vnic_setup_err;
6630 }
6631 bp->rsscos_nr_ctxs++;
6632 }
6633
Michael Chanae10ae72016-12-29 12:13:38 -05006634skip_rss_ctx:
Michael Chanc0c050c2015-10-22 16:01:17 -04006635 /* configure default vnic, ring grp */
6636 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6637 if (rc) {
6638 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6639 vnic_id, rc);
6640 goto vnic_setup_err;
6641 }
6642
6643 /* Enable RSS hashing on vnic */
6644 rc = bnxt_hwrm_vnic_set_rss(bp, vnic_id, true);
6645 if (rc) {
6646 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %x\n",
6647 vnic_id, rc);
6648 goto vnic_setup_err;
6649 }
6650
6651 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6652 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6653 if (rc) {
6654 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6655 vnic_id, rc);
6656 }
6657 }
6658
6659vnic_setup_err:
6660 return rc;
6661}
6662
Michael Chan7b3af4f2018-10-14 07:02:54 -04006663static int __bnxt_setup_vnic_p5(struct bnxt *bp, u16 vnic_id)
6664{
6665 int rc, i, nr_ctxs;
6666
6667 nr_ctxs = DIV_ROUND_UP(bp->rx_nr_rings, 64);
6668 for (i = 0; i < nr_ctxs; i++) {
6669 rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic_id, i);
6670 if (rc) {
6671 netdev_err(bp->dev, "hwrm vnic %d ctx %d alloc failure rc: %x\n",
6672 vnic_id, i, rc);
6673 break;
6674 }
6675 bp->rsscos_nr_ctxs++;
6676 }
6677 if (i < nr_ctxs)
6678 return -ENOMEM;
6679
6680 rc = bnxt_hwrm_vnic_set_rss_p5(bp, vnic_id, true);
6681 if (rc) {
6682 netdev_err(bp->dev, "hwrm vnic %d set rss failure rc: %d\n",
6683 vnic_id, rc);
6684 return rc;
6685 }
6686 rc = bnxt_hwrm_vnic_cfg(bp, vnic_id);
6687 if (rc) {
6688 netdev_err(bp->dev, "hwrm vnic %d cfg failure rc: %x\n",
6689 vnic_id, rc);
6690 return rc;
6691 }
6692 if (bp->flags & BNXT_FLAG_AGG_RINGS) {
6693 rc = bnxt_hwrm_vnic_set_hds(bp, vnic_id);
6694 if (rc) {
6695 netdev_err(bp->dev, "hwrm vnic %d set hds failure rc: %x\n",
6696 vnic_id, rc);
6697 }
6698 }
6699 return rc;
6700}
6701
6702static int bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id)
6703{
6704 if (bp->flags & BNXT_FLAG_CHIP_P5)
6705 return __bnxt_setup_vnic_p5(bp, vnic_id);
6706 else
6707 return __bnxt_setup_vnic(bp, vnic_id);
6708}
6709
Michael Chanc0c050c2015-10-22 16:01:17 -04006710static int bnxt_alloc_rfs_vnics(struct bnxt *bp)
6711{
6712#ifdef CONFIG_RFS_ACCEL
6713 int i, rc = 0;
6714
6715 for (i = 0; i < bp->rx_nr_rings; i++) {
Michael Chanae10ae72016-12-29 12:13:38 -05006716 struct bnxt_vnic_info *vnic;
Michael Chanc0c050c2015-10-22 16:01:17 -04006717 u16 vnic_id = i + 1;
6718 u16 ring_id = i;
6719
6720 if (vnic_id >= bp->nr_vnics)
6721 break;
6722
Michael Chanae10ae72016-12-29 12:13:38 -05006723 vnic = &bp->vnic_info[vnic_id];
6724 vnic->flags |= BNXT_VNIC_RFS_FLAG;
6725 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
6726 vnic->flags |= BNXT_VNIC_RFS_NEW_RSS_FLAG;
Michael Chanb81a90d2016-01-02 23:45:01 -05006727 rc = bnxt_hwrm_vnic_alloc(bp, vnic_id, ring_id, 1);
Michael Chanc0c050c2015-10-22 16:01:17 -04006728 if (rc) {
6729 netdev_err(bp->dev, "hwrm vnic %d alloc failure rc: %x\n",
6730 vnic_id, rc);
6731 break;
6732 }
6733 rc = bnxt_setup_vnic(bp, vnic_id);
6734 if (rc)
6735 break;
6736 }
6737 return rc;
6738#else
6739 return 0;
6740#endif
6741}
6742
Michael Chan17c71ac2016-07-01 18:46:27 -04006743/* Allow PF and VF with default VLAN to be in promiscuous mode */
6744static bool bnxt_promisc_ok(struct bnxt *bp)
6745{
6746#ifdef CONFIG_BNXT_SRIOV
6747 if (BNXT_VF(bp) && !bp->vf.vlan)
6748 return false;
6749#endif
6750 return true;
6751}
6752
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04006753static int bnxt_setup_nitroa0_vnic(struct bnxt *bp)
6754{
6755 unsigned int rc = 0;
6756
6757 rc = bnxt_hwrm_vnic_alloc(bp, 1, bp->rx_nr_rings - 1, 1);
6758 if (rc) {
6759 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
6760 rc);
6761 return rc;
6762 }
6763
6764 rc = bnxt_hwrm_vnic_cfg(bp, 1);
6765 if (rc) {
6766 netdev_err(bp->dev, "Cannot allocate special vnic for NS2 A0: %x\n",
6767 rc);
6768 return rc;
6769 }
6770 return rc;
6771}
6772
Michael Chanb664f002015-12-02 01:54:08 -05006773static int bnxt_cfg_rx_mode(struct bnxt *);
Michael Chan7d2837d2016-05-04 16:56:44 -04006774static bool bnxt_mc_list_updated(struct bnxt *, u32 *);
Michael Chanb664f002015-12-02 01:54:08 -05006775
Michael Chanc0c050c2015-10-22 16:01:17 -04006776static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
6777{
Michael Chan7d2837d2016-05-04 16:56:44 -04006778 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
Michael Chanc0c050c2015-10-22 16:01:17 -04006779 int rc = 0;
Prashant Sreedharan76595192016-07-18 07:15:22 -04006780 unsigned int rx_nr_rings = bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04006781
6782 if (irq_re_init) {
6783 rc = bnxt_hwrm_stat_ctx_alloc(bp);
6784 if (rc) {
6785 netdev_err(bp->dev, "hwrm stat ctx alloc failure rc: %x\n",
6786 rc);
6787 goto err_out;
6788 }
6789 }
6790
6791 rc = bnxt_hwrm_ring_alloc(bp);
6792 if (rc) {
6793 netdev_err(bp->dev, "hwrm ring alloc failure rc: %x\n", rc);
6794 goto err_out;
6795 }
6796
6797 rc = bnxt_hwrm_ring_grp_alloc(bp);
6798 if (rc) {
6799 netdev_err(bp->dev, "hwrm_ring_grp alloc failure: %x\n", rc);
6800 goto err_out;
6801 }
6802
Prashant Sreedharan76595192016-07-18 07:15:22 -04006803 if (BNXT_CHIP_TYPE_NITRO_A0(bp))
6804 rx_nr_rings--;
6805
Michael Chanc0c050c2015-10-22 16:01:17 -04006806 /* default vnic 0 */
Prashant Sreedharan76595192016-07-18 07:15:22 -04006807 rc = bnxt_hwrm_vnic_alloc(bp, 0, 0, rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04006808 if (rc) {
6809 netdev_err(bp->dev, "hwrm vnic alloc failure rc: %x\n", rc);
6810 goto err_out;
6811 }
6812
6813 rc = bnxt_setup_vnic(bp, 0);
6814 if (rc)
6815 goto err_out;
6816
6817 if (bp->flags & BNXT_FLAG_RFS) {
6818 rc = bnxt_alloc_rfs_vnics(bp);
6819 if (rc)
6820 goto err_out;
6821 }
6822
6823 if (bp->flags & BNXT_FLAG_TPA) {
6824 rc = bnxt_set_tpa(bp, true);
6825 if (rc)
6826 goto err_out;
6827 }
6828
6829 if (BNXT_VF(bp))
6830 bnxt_update_vf_mac(bp);
6831
6832 /* Filter for default vnic 0 */
6833 rc = bnxt_hwrm_set_vnic_filter(bp, 0, 0, bp->dev->dev_addr);
6834 if (rc) {
6835 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n", rc);
6836 goto err_out;
6837 }
Michael Chan7d2837d2016-05-04 16:56:44 -04006838 vnic->uc_filter_count = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04006839
Michael Chan30e33842018-07-09 02:24:50 -04006840 vnic->rx_mask = 0;
6841 if (bp->dev->flags & IFF_BROADCAST)
6842 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04006843
Michael Chan17c71ac2016-07-01 18:46:27 -04006844 if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chan7d2837d2016-05-04 16:56:44 -04006845 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
6846
6847 if (bp->dev->flags & IFF_ALLMULTI) {
6848 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
6849 vnic->mc_list_count = 0;
6850 } else {
6851 u32 mask = 0;
6852
6853 bnxt_mc_list_updated(bp, &mask);
6854 vnic->rx_mask |= mask;
6855 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006856
Michael Chanb664f002015-12-02 01:54:08 -05006857 rc = bnxt_cfg_rx_mode(bp);
6858 if (rc)
Michael Chanc0c050c2015-10-22 16:01:17 -04006859 goto err_out;
Michael Chanc0c050c2015-10-22 16:01:17 -04006860
6861 rc = bnxt_hwrm_set_coal(bp);
6862 if (rc)
6863 netdev_warn(bp->dev, "HWRM set coalescing failure rc: %x\n",
Prashant Sreedharandc52c6c2016-07-18 07:15:24 -04006864 rc);
6865
6866 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
6867 rc = bnxt_setup_nitroa0_vnic(bp);
6868 if (rc)
6869 netdev_err(bp->dev, "Special vnic setup failure for NS2 A0 rc: %x\n",
6870 rc);
6871 }
Michael Chanc0c050c2015-10-22 16:01:17 -04006872
Michael Chancf6645f2016-06-13 02:25:28 -04006873 if (BNXT_VF(bp)) {
6874 bnxt_hwrm_func_qcfg(bp);
6875 netdev_update_features(bp->dev);
6876 }
6877
Michael Chanc0c050c2015-10-22 16:01:17 -04006878 return 0;
6879
6880err_out:
6881 bnxt_hwrm_resource_free(bp, 0, true);
6882
6883 return rc;
6884}
6885
6886static int bnxt_shutdown_nic(struct bnxt *bp, bool irq_re_init)
6887{
6888 bnxt_hwrm_resource_free(bp, 1, irq_re_init);
6889 return 0;
6890}
6891
6892static int bnxt_init_nic(struct bnxt *bp, bool irq_re_init)
6893{
Sankar Patchineelam22479252017-03-28 19:47:29 -04006894 bnxt_init_cp_rings(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006895 bnxt_init_rx_rings(bp);
6896 bnxt_init_tx_rings(bp);
6897 bnxt_init_ring_grps(bp, irq_re_init);
6898 bnxt_init_vnics(bp);
6899
6900 return bnxt_init_chip(bp, irq_re_init);
6901}
6902
Michael Chanc0c050c2015-10-22 16:01:17 -04006903static int bnxt_set_real_num_queues(struct bnxt *bp)
6904{
6905 int rc;
6906 struct net_device *dev = bp->dev;
6907
Michael Chan5f449242017-02-06 16:55:40 -05006908 rc = netif_set_real_num_tx_queues(dev, bp->tx_nr_rings -
6909 bp->tx_nr_rings_xdp);
Michael Chanc0c050c2015-10-22 16:01:17 -04006910 if (rc)
6911 return rc;
6912
6913 rc = netif_set_real_num_rx_queues(dev, bp->rx_nr_rings);
6914 if (rc)
6915 return rc;
6916
6917#ifdef CONFIG_RFS_ACCEL
Michael Chan45019a12015-12-27 18:19:22 -05006918 if (bp->flags & BNXT_FLAG_RFS)
Michael Chanc0c050c2015-10-22 16:01:17 -04006919 dev->rx_cpu_rmap = alloc_irq_cpu_rmap(bp->rx_nr_rings);
Michael Chanc0c050c2015-10-22 16:01:17 -04006920#endif
6921
6922 return rc;
6923}
6924
Michael Chan6e6c5a52016-01-02 23:45:02 -05006925static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max,
6926 bool shared)
6927{
6928 int _rx = *rx, _tx = *tx;
6929
6930 if (shared) {
6931 *rx = min_t(int, _rx, max);
6932 *tx = min_t(int, _tx, max);
6933 } else {
6934 if (max < 2)
6935 return -ENOMEM;
6936
6937 while (_rx + _tx > max) {
6938 if (_rx > _tx && _rx > 1)
6939 _rx--;
6940 else if (_tx > 1)
6941 _tx--;
6942 }
6943 *rx = _rx;
6944 *tx = _tx;
6945 }
6946 return 0;
6947}
6948
Michael Chan78095922016-12-07 00:26:16 -05006949static void bnxt_setup_msix(struct bnxt *bp)
6950{
6951 const int len = sizeof(bp->irq_tbl[0].name);
6952 struct net_device *dev = bp->dev;
6953 int tcs, i;
6954
6955 tcs = netdev_get_num_tc(dev);
6956 if (tcs > 1) {
Michael Chand1e79252017-02-06 16:55:38 -05006957 int i, off, count;
Michael Chan78095922016-12-07 00:26:16 -05006958
Michael Chand1e79252017-02-06 16:55:38 -05006959 for (i = 0; i < tcs; i++) {
6960 count = bp->tx_nr_rings_per_tc;
6961 off = i * count;
6962 netdev_set_tc_queue(dev, i, count, off);
Michael Chan78095922016-12-07 00:26:16 -05006963 }
6964 }
6965
6966 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chane5811b82018-03-31 13:54:18 -04006967 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
Michael Chan78095922016-12-07 00:26:16 -05006968 char *attr;
6969
6970 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
6971 attr = "TxRx";
6972 else if (i < bp->rx_nr_rings)
6973 attr = "rx";
6974 else
6975 attr = "tx";
6976
Michael Chane5811b82018-03-31 13:54:18 -04006977 snprintf(bp->irq_tbl[map_idx].name, len, "%s-%s-%d", dev->name,
6978 attr, i);
6979 bp->irq_tbl[map_idx].handler = bnxt_msix;
Michael Chan78095922016-12-07 00:26:16 -05006980 }
6981}
6982
6983static void bnxt_setup_inta(struct bnxt *bp)
6984{
6985 const int len = sizeof(bp->irq_tbl[0].name);
6986
6987 if (netdev_get_num_tc(bp->dev))
6988 netdev_reset_tc(bp->dev);
6989
6990 snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
6991 0);
6992 bp->irq_tbl[0].handler = bnxt_inta;
6993}
6994
6995static int bnxt_setup_int_mode(struct bnxt *bp)
6996{
6997 int rc;
6998
6999 if (bp->flags & BNXT_FLAG_USING_MSIX)
7000 bnxt_setup_msix(bp);
7001 else
7002 bnxt_setup_inta(bp);
7003
7004 rc = bnxt_set_real_num_queues(bp);
7005 return rc;
7006}
7007
Michael Chanb7429952017-01-13 01:32:00 -05007008#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05007009static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
7010{
Michael Chan6a4f2942018-01-17 03:21:06 -05007011 return bp->hw_resc.max_rsscos_ctxs;
Michael Chan8079e8f2016-12-29 12:13:37 -05007012}
7013
7014static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
7015{
Michael Chan6a4f2942018-01-17 03:21:06 -05007016 return bp->hw_resc.max_vnics;
Michael Chan8079e8f2016-12-29 12:13:37 -05007017}
Michael Chanb7429952017-01-13 01:32:00 -05007018#endif
Michael Chan8079e8f2016-12-29 12:13:37 -05007019
Michael Chane4060d32016-12-07 00:26:19 -05007020unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
7021{
Michael Chan6a4f2942018-01-17 03:21:06 -05007022 return bp->hw_resc.max_stat_ctxs;
Michael Chane4060d32016-12-07 00:26:19 -05007023}
7024
Michael Chana588e452016-12-07 00:26:21 -05007025void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max)
7026{
Michael Chan6a4f2942018-01-17 03:21:06 -05007027 bp->hw_resc.max_stat_ctxs = max;
Michael Chana588e452016-12-07 00:26:21 -05007028}
7029
Michael Chane4060d32016-12-07 00:26:19 -05007030unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp)
7031{
Michael Chan6a4f2942018-01-17 03:21:06 -05007032 return bp->hw_resc.max_cp_rings;
Michael Chane4060d32016-12-07 00:26:19 -05007033}
7034
Michael Chan00fe9c32018-09-03 04:23:19 -04007035unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp)
Michael Chana588e452016-12-07 00:26:21 -05007036{
Michael Chan00fe9c32018-09-03 04:23:19 -04007037 return bp->hw_resc.max_cp_rings - bnxt_get_ulp_msix_num(bp);
Michael Chana588e452016-12-07 00:26:21 -05007038}
7039
Michael Chanad95c272018-09-03 04:23:18 -04007040static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
Michael Chan78095922016-12-07 00:26:16 -05007041{
Michael Chan6a4f2942018-01-17 03:21:06 -05007042 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7043
7044 return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
Michael Chan78095922016-12-07 00:26:16 -05007045}
7046
Michael Chan30f52942018-07-09 02:24:51 -04007047static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
Michael Chan33c26572016-12-07 00:26:15 -05007048{
Michael Chan6a4f2942018-01-17 03:21:06 -05007049 bp->hw_resc.max_irqs = max_irqs;
Michael Chan33c26572016-12-07 00:26:15 -05007050}
7051
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007052int bnxt_get_avail_msix(struct bnxt *bp, int num)
7053{
7054 int max_cp = bnxt_get_max_func_cp_rings(bp);
7055 int max_irq = bnxt_get_max_func_irqs(bp);
7056 int total_req = bp->cp_nr_rings + num;
7057 int max_idx, avail_msix;
7058
Michael Chan75720e62018-12-09 07:01:00 -05007059 max_idx = bp->total_irqs;
7060 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
7061 max_idx = min_t(int, bp->total_irqs, max_cp);
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007062 avail_msix = max_idx - bp->cp_nr_rings;
Michael Chanf1ca94d2018-08-05 16:51:53 -04007063 if (!BNXT_NEW_RM(bp) || avail_msix >= num)
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007064 return avail_msix;
7065
7066 if (max_irq < total_req) {
7067 num = max_irq - bp->cp_nr_rings;
7068 if (num <= 0)
7069 return 0;
7070 }
7071 return num;
7072}
7073
Michael Chan08654eb2018-03-31 13:54:17 -04007074static int bnxt_get_num_msix(struct bnxt *bp)
7075{
Michael Chanf1ca94d2018-08-05 16:51:53 -04007076 if (!BNXT_NEW_RM(bp))
Michael Chan08654eb2018-03-31 13:54:17 -04007077 return bnxt_get_max_func_irqs(bp);
7078
7079 return bnxt_cp_rings_in_use(bp);
7080}
7081
Michael Chan78095922016-12-07 00:26:16 -05007082static int bnxt_init_msix(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007083{
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007084 int i, total_vecs, max, rc = 0, min = 1, ulp_msix;
Michael Chan78095922016-12-07 00:26:16 -05007085 struct msix_entry *msix_ent;
Michael Chanc0c050c2015-10-22 16:01:17 -04007086
Michael Chan08654eb2018-03-31 13:54:17 -04007087 total_vecs = bnxt_get_num_msix(bp);
7088 max = bnxt_get_max_func_irqs(bp);
7089 if (total_vecs > max)
7090 total_vecs = max;
7091
Michael Chan2773dfb2018-04-26 17:44:42 -04007092 if (!total_vecs)
7093 return 0;
7094
Michael Chanc0c050c2015-10-22 16:01:17 -04007095 msix_ent = kcalloc(total_vecs, sizeof(struct msix_entry), GFP_KERNEL);
7096 if (!msix_ent)
7097 return -ENOMEM;
7098
7099 for (i = 0; i < total_vecs; i++) {
7100 msix_ent[i].entry = i;
7101 msix_ent[i].vector = 0;
7102 }
7103
Michael Chan01657bc2016-01-02 23:45:03 -05007104 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS))
7105 min = 2;
7106
7107 total_vecs = pci_enable_msix_range(bp->pdev, msix_ent, min, total_vecs);
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007108 ulp_msix = bnxt_get_ulp_msix_num(bp);
7109 if (total_vecs < 0 || total_vecs < ulp_msix) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007110 rc = -ENODEV;
7111 goto msix_setup_exit;
7112 }
7113
7114 bp->irq_tbl = kcalloc(total_vecs, sizeof(struct bnxt_irq), GFP_KERNEL);
7115 if (bp->irq_tbl) {
Michael Chan78095922016-12-07 00:26:16 -05007116 for (i = 0; i < total_vecs; i++)
7117 bp->irq_tbl[i].vector = msix_ent[i].vector;
Michael Chanc0c050c2015-10-22 16:01:17 -04007118
Michael Chan78095922016-12-07 00:26:16 -05007119 bp->total_irqs = total_vecs;
Michael Chanc0c050c2015-10-22 16:01:17 -04007120 /* Trim rings based upon num of vectors allocated */
Michael Chan6e6c5a52016-01-02 23:45:02 -05007121 rc = bnxt_trim_rings(bp, &bp->rx_nr_rings, &bp->tx_nr_rings,
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007122 total_vecs - ulp_msix, min == 1);
Michael Chan6e6c5a52016-01-02 23:45:02 -05007123 if (rc)
7124 goto msix_setup_exit;
7125
Michael Chan78095922016-12-07 00:26:16 -05007126 bp->cp_nr_rings = (min == 1) ?
7127 max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
7128 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04007129
Michael Chanc0c050c2015-10-22 16:01:17 -04007130 } else {
7131 rc = -ENOMEM;
7132 goto msix_setup_exit;
7133 }
7134 bp->flags |= BNXT_FLAG_USING_MSIX;
7135 kfree(msix_ent);
7136 return 0;
7137
7138msix_setup_exit:
Michael Chan78095922016-12-07 00:26:16 -05007139 netdev_err(bp->dev, "bnxt_init_msix err: %x\n", rc);
7140 kfree(bp->irq_tbl);
7141 bp->irq_tbl = NULL;
Michael Chanc0c050c2015-10-22 16:01:17 -04007142 pci_disable_msix(bp->pdev);
7143 kfree(msix_ent);
7144 return rc;
7145}
7146
Michael Chan78095922016-12-07 00:26:16 -05007147static int bnxt_init_inta(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007148{
Michael Chanc0c050c2015-10-22 16:01:17 -04007149 bp->irq_tbl = kcalloc(1, sizeof(struct bnxt_irq), GFP_KERNEL);
Michael Chan78095922016-12-07 00:26:16 -05007150 if (!bp->irq_tbl)
7151 return -ENOMEM;
7152
7153 bp->total_irqs = 1;
Michael Chanc0c050c2015-10-22 16:01:17 -04007154 bp->rx_nr_rings = 1;
7155 bp->tx_nr_rings = 1;
7156 bp->cp_nr_rings = 1;
Michael Chan01657bc2016-01-02 23:45:03 -05007157 bp->flags |= BNXT_FLAG_SHARED_RINGS;
Michael Chanc0c050c2015-10-22 16:01:17 -04007158 bp->irq_tbl[0].vector = bp->pdev->irq;
Michael Chan78095922016-12-07 00:26:16 -05007159 return 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007160}
7161
Michael Chan78095922016-12-07 00:26:16 -05007162static int bnxt_init_int_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007163{
7164 int rc = 0;
7165
7166 if (bp->flags & BNXT_FLAG_MSIX_CAP)
Michael Chan78095922016-12-07 00:26:16 -05007167 rc = bnxt_init_msix(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007168
Michael Chan1fa72e22016-04-25 02:30:49 -04007169 if (!(bp->flags & BNXT_FLAG_USING_MSIX) && BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007170 /* fallback to INTA */
Michael Chan78095922016-12-07 00:26:16 -05007171 rc = bnxt_init_inta(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04007172 }
7173 return rc;
7174}
7175
Michael Chan78095922016-12-07 00:26:16 -05007176static void bnxt_clear_int_mode(struct bnxt *bp)
7177{
7178 if (bp->flags & BNXT_FLAG_USING_MSIX)
7179 pci_disable_msix(bp->pdev);
7180
7181 kfree(bp->irq_tbl);
7182 bp->irq_tbl = NULL;
7183 bp->flags &= ~BNXT_FLAG_USING_MSIX;
7184}
7185
Michael Chanfbcfc8e2018-03-31 13:54:20 -04007186int bnxt_reserve_rings(struct bnxt *bp)
Michael Chan674f50a2018-01-17 03:21:09 -05007187{
Michael Chan674f50a2018-01-17 03:21:09 -05007188 int tcs = netdev_get_num_tc(bp->dev);
7189 int rc;
7190
7191 if (!bnxt_need_reserve_rings(bp))
7192 return 0;
7193
7194 rc = __bnxt_reserve_rings(bp);
7195 if (rc) {
7196 netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc);
7197 return rc;
7198 }
Michael Chanf1ca94d2018-08-05 16:51:53 -04007199 if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) {
Michael Chanec86f142018-03-31 13:54:21 -04007200 bnxt_ulp_irq_stop(bp);
Michael Chan674f50a2018-01-17 03:21:09 -05007201 bnxt_clear_int_mode(bp);
7202 rc = bnxt_init_int_mode(bp);
Michael Chanec86f142018-03-31 13:54:21 -04007203 bnxt_ulp_irq_restart(bp, rc);
Michael Chan674f50a2018-01-17 03:21:09 -05007204 if (rc)
7205 return rc;
7206 }
7207 if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) {
7208 netdev_err(bp->dev, "tx ring reservation failure\n");
7209 netdev_reset_tc(bp->dev);
7210 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
7211 return -ENOMEM;
7212 }
7213 bp->num_stat_ctxs = bp->cp_nr_rings;
7214 return 0;
7215}
7216
Michael Chanc0c050c2015-10-22 16:01:17 -04007217static void bnxt_free_irq(struct bnxt *bp)
7218{
7219 struct bnxt_irq *irq;
7220 int i;
7221
7222#ifdef CONFIG_RFS_ACCEL
7223 free_irq_cpu_rmap(bp->dev->rx_cpu_rmap);
7224 bp->dev->rx_cpu_rmap = NULL;
7225#endif
Michael Chancb985262018-04-11 11:50:18 -04007226 if (!bp->irq_tbl || !bp->bnapi)
Michael Chanc0c050c2015-10-22 16:01:17 -04007227 return;
7228
7229 for (i = 0; i < bp->cp_nr_rings; i++) {
Michael Chane5811b82018-03-31 13:54:18 -04007230 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7231
7232 irq = &bp->irq_tbl[map_idx];
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04007233 if (irq->requested) {
7234 if (irq->have_cpumask) {
7235 irq_set_affinity_hint(irq->vector, NULL);
7236 free_cpumask_var(irq->cpu_mask);
7237 irq->have_cpumask = 0;
7238 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007239 free_irq(irq->vector, bp->bnapi[i]);
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04007240 }
7241
Michael Chanc0c050c2015-10-22 16:01:17 -04007242 irq->requested = 0;
7243 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007244}
7245
7246static int bnxt_request_irq(struct bnxt *bp)
7247{
Michael Chanb81a90d2016-01-02 23:45:01 -05007248 int i, j, rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04007249 unsigned long flags = 0;
7250#ifdef CONFIG_RFS_ACCEL
Michael Chane5811b82018-03-31 13:54:18 -04007251 struct cpu_rmap *rmap;
Michael Chanc0c050c2015-10-22 16:01:17 -04007252#endif
7253
Michael Chane5811b82018-03-31 13:54:18 -04007254 rc = bnxt_setup_int_mode(bp);
7255 if (rc) {
7256 netdev_err(bp->dev, "bnxt_setup_int_mode err: %x\n",
7257 rc);
7258 return rc;
7259 }
7260#ifdef CONFIG_RFS_ACCEL
7261 rmap = bp->dev->rx_cpu_rmap;
7262#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04007263 if (!(bp->flags & BNXT_FLAG_USING_MSIX))
7264 flags = IRQF_SHARED;
7265
Michael Chanb81a90d2016-01-02 23:45:01 -05007266 for (i = 0, j = 0; i < bp->cp_nr_rings; i++) {
Michael Chane5811b82018-03-31 13:54:18 -04007267 int map_idx = bnxt_cp_num_to_irq_num(bp, i);
7268 struct bnxt_irq *irq = &bp->irq_tbl[map_idx];
7269
Michael Chanc0c050c2015-10-22 16:01:17 -04007270#ifdef CONFIG_RFS_ACCEL
Michael Chanb81a90d2016-01-02 23:45:01 -05007271 if (rmap && bp->bnapi[i]->rx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007272 rc = irq_cpu_rmap_add(rmap, irq->vector);
7273 if (rc)
7274 netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n",
Michael Chanb81a90d2016-01-02 23:45:01 -05007275 j);
7276 j++;
Michael Chanc0c050c2015-10-22 16:01:17 -04007277 }
7278#endif
7279 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
7280 bp->bnapi[i]);
7281 if (rc)
7282 break;
7283
7284 irq->requested = 1;
Vasundhara Volam56f0fd82017-08-28 13:40:27 -04007285
7286 if (zalloc_cpumask_var(&irq->cpu_mask, GFP_KERNEL)) {
7287 int numa_node = dev_to_node(&bp->pdev->dev);
7288
7289 irq->have_cpumask = 1;
7290 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
7291 irq->cpu_mask);
7292 rc = irq_set_affinity_hint(irq->vector, irq->cpu_mask);
7293 if (rc) {
7294 netdev_warn(bp->dev,
7295 "Set affinity failed, IRQ = %d\n",
7296 irq->vector);
7297 break;
7298 }
7299 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007300 }
7301 return rc;
7302}
7303
7304static void bnxt_del_napi(struct bnxt *bp)
7305{
7306 int i;
7307
7308 if (!bp->bnapi)
7309 return;
7310
7311 for (i = 0; i < bp->cp_nr_rings; i++) {
7312 struct bnxt_napi *bnapi = bp->bnapi[i];
7313
7314 napi_hash_del(&bnapi->napi);
7315 netif_napi_del(&bnapi->napi);
7316 }
Eric Dumazete5f6f562016-11-16 06:31:52 -08007317 /* We called napi_hash_del() before netif_napi_del(), we need
7318 * to respect an RCU grace period before freeing napi structures.
7319 */
7320 synchronize_net();
Michael Chanc0c050c2015-10-22 16:01:17 -04007321}
7322
7323static void bnxt_init_napi(struct bnxt *bp)
7324{
7325 int i;
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007326 unsigned int cp_nr_rings = bp->cp_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04007327 struct bnxt_napi *bnapi;
7328
7329 if (bp->flags & BNXT_FLAG_USING_MSIX) {
Michael Chan0fcec982018-10-14 07:02:58 -04007330 int (*poll_fn)(struct napi_struct *, int) = bnxt_poll;
7331
7332 if (bp->flags & BNXT_FLAG_CHIP_P5)
7333 poll_fn = bnxt_poll_p5;
7334 else if (BNXT_CHIP_TYPE_NITRO_A0(bp))
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007335 cp_nr_rings--;
7336 for (i = 0; i < cp_nr_rings; i++) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007337 bnapi = bp->bnapi[i];
Michael Chan0fcec982018-10-14 07:02:58 -04007338 netif_napi_add(bp->dev, &bnapi->napi, poll_fn, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04007339 }
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007340 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
7341 bnapi = bp->bnapi[cp_nr_rings];
7342 netif_napi_add(bp->dev, &bnapi->napi,
7343 bnxt_poll_nitroa0, 64);
Prashant Sreedharan10bbdaf2016-07-18 07:15:23 -04007344 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007345 } else {
7346 bnapi = bp->bnapi[0];
7347 netif_napi_add(bp->dev, &bnapi->napi, bnxt_poll, 64);
Michael Chanc0c050c2015-10-22 16:01:17 -04007348 }
7349}
7350
7351static void bnxt_disable_napi(struct bnxt *bp)
7352{
7353 int i;
7354
7355 if (!bp->bnapi)
7356 return;
7357
Andy Gospodarek0bc0b972018-01-26 10:27:47 -05007358 for (i = 0; i < bp->cp_nr_rings; i++) {
7359 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
7360
7361 if (bp->bnapi[i]->rx_ring)
7362 cancel_work_sync(&cpr->dim.work);
7363
Michael Chanc0c050c2015-10-22 16:01:17 -04007364 napi_disable(&bp->bnapi[i]->napi);
Andy Gospodarek0bc0b972018-01-26 10:27:47 -05007365 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007366}
7367
7368static void bnxt_enable_napi(struct bnxt *bp)
7369{
7370 int i;
7371
7372 for (i = 0; i < bp->cp_nr_rings; i++) {
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05007373 struct bnxt_cp_ring_info *cpr = &bp->bnapi[i]->cp_ring;
Michael Chanfa7e2812016-05-10 19:18:00 -04007374 bp->bnapi[i]->in_reset = false;
Andy Gospodarek6a8788f2018-01-09 16:06:20 -05007375
7376 if (bp->bnapi[i]->rx_ring) {
7377 INIT_WORK(&cpr->dim.work, bnxt_dim_work);
7378 cpr->dim.mode = NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE;
7379 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007380 napi_enable(&bp->bnapi[i]->napi);
7381 }
7382}
7383
Michael Chan7df4ae92016-12-02 21:17:17 -05007384void bnxt_tx_disable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007385{
7386 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04007387 struct bnxt_tx_ring_info *txr;
Michael Chanc0c050c2015-10-22 16:01:17 -04007388
Michael Chanb6ab4b02016-01-02 23:44:59 -05007389 if (bp->tx_ring) {
Michael Chanc0c050c2015-10-22 16:01:17 -04007390 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05007391 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04007392 txr->dev_state = BNXT_DEV_STATE_CLOSING;
Michael Chanc0c050c2015-10-22 16:01:17 -04007393 }
7394 }
7395 /* Stop all TX queues */
7396 netif_tx_disable(bp->dev);
7397 netif_carrier_off(bp->dev);
7398}
7399
Michael Chan7df4ae92016-12-02 21:17:17 -05007400void bnxt_tx_enable(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04007401{
7402 int i;
Michael Chanc0c050c2015-10-22 16:01:17 -04007403 struct bnxt_tx_ring_info *txr;
Michael Chanc0c050c2015-10-22 16:01:17 -04007404
7405 for (i = 0; i < bp->tx_nr_rings; i++) {
Michael Chanb6ab4b02016-01-02 23:44:59 -05007406 txr = &bp->tx_ring[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04007407 txr->dev_state = 0;
7408 }
7409 netif_tx_wake_all_queues(bp->dev);
7410 if (bp->link_info.link_up)
7411 netif_carrier_on(bp->dev);
7412}
7413
7414static void bnxt_report_link(struct bnxt *bp)
7415{
7416 if (bp->link_info.link_up) {
7417 const char *duplex;
7418 const char *flow_ctrl;
Deepak Khungar38a21b32017-04-21 20:11:24 -04007419 u32 speed;
7420 u16 fec;
Michael Chanc0c050c2015-10-22 16:01:17 -04007421
7422 netif_carrier_on(bp->dev);
7423 if (bp->link_info.duplex == BNXT_LINK_DUPLEX_FULL)
7424 duplex = "full";
7425 else
7426 duplex = "half";
7427 if (bp->link_info.pause == BNXT_LINK_PAUSE_BOTH)
7428 flow_ctrl = "ON - receive & transmit";
7429 else if (bp->link_info.pause == BNXT_LINK_PAUSE_TX)
7430 flow_ctrl = "ON - transmit";
7431 else if (bp->link_info.pause == BNXT_LINK_PAUSE_RX)
7432 flow_ctrl = "ON - receive";
7433 else
7434 flow_ctrl = "none";
7435 speed = bnxt_fw_to_ethtool_speed(bp->link_info.link_speed);
Deepak Khungar38a21b32017-04-21 20:11:24 -04007436 netdev_info(bp->dev, "NIC Link is Up, %u Mbps %s duplex, Flow control: %s\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04007437 speed, duplex, flow_ctrl);
Michael Chan170ce012016-04-05 14:08:57 -04007438 if (bp->flags & BNXT_FLAG_EEE_CAP)
7439 netdev_info(bp->dev, "EEE is %s\n",
7440 bp->eee.eee_active ? "active" :
7441 "not active");
Michael Chane70c7522017-02-12 19:18:16 -05007442 fec = bp->link_info.fec_cfg;
7443 if (!(fec & PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED))
7444 netdev_info(bp->dev, "FEC autoneg %s encodings: %s\n",
7445 (fec & BNXT_FEC_AUTONEG) ? "on" : "off",
7446 (fec & BNXT_FEC_ENC_BASE_R) ? "BaseR" :
7447 (fec & BNXT_FEC_ENC_RS) ? "RS" : "None");
Michael Chanc0c050c2015-10-22 16:01:17 -04007448 } else {
7449 netif_carrier_off(bp->dev);
7450 netdev_err(bp->dev, "NIC Link is Down\n");
7451 }
7452}
7453
Michael Chan170ce012016-04-05 14:08:57 -04007454static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
7455{
7456 int rc = 0;
7457 struct hwrm_port_phy_qcaps_input req = {0};
7458 struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
Michael Chan93ed8112016-06-13 02:25:37 -04007459 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chan170ce012016-04-05 14:08:57 -04007460
7461 if (bp->hwrm_spec_code < 0x10201)
7462 return 0;
7463
7464 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1);
7465
7466 mutex_lock(&bp->hwrm_cmd_lock);
7467 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7468 if (rc)
7469 goto hwrm_phy_qcaps_exit;
7470
Michael Chanacb20052017-07-24 12:34:20 -04007471 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EEE_SUPPORTED) {
Michael Chan170ce012016-04-05 14:08:57 -04007472 struct ethtool_eee *eee = &bp->eee;
7473 u16 fw_speeds = le16_to_cpu(resp->supported_speeds_eee_mode);
7474
7475 bp->flags |= BNXT_FLAG_EEE_CAP;
7476 eee->supported = _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7477 bp->lpi_tmr_lo = le32_to_cpu(resp->tx_lpi_timer_low) &
7478 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_LOW_MASK;
7479 bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
7480 PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
7481 }
Michael Chan55fd0cf2018-08-05 16:51:48 -04007482 if (resp->flags & PORT_PHY_QCAPS_RESP_FLAGS_EXTERNAL_LPBK_SUPPORTED) {
7483 if (bp->test_info)
7484 bp->test_info->flags |= BNXT_TEST_FL_EXT_LPBK;
7485 }
Michael Chan520ad892017-03-08 18:44:35 -05007486 if (resp->supported_speeds_auto_mode)
7487 link_info->support_auto_speeds =
7488 le16_to_cpu(resp->supported_speeds_auto_mode);
Michael Chan170ce012016-04-05 14:08:57 -04007489
Michael Chand5430d32017-08-28 13:40:31 -04007490 bp->port_count = resp->port_cnt;
7491
Michael Chan170ce012016-04-05 14:08:57 -04007492hwrm_phy_qcaps_exit:
7493 mutex_unlock(&bp->hwrm_cmd_lock);
7494 return rc;
7495}
7496
Michael Chanc0c050c2015-10-22 16:01:17 -04007497static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
7498{
7499 int rc = 0;
7500 struct bnxt_link_info *link_info = &bp->link_info;
7501 struct hwrm_port_phy_qcfg_input req = {0};
7502 struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7503 u8 link_up = link_info->link_up;
Michael Chan286ef9d2016-11-16 21:13:08 -05007504 u16 diff;
Michael Chanc0c050c2015-10-22 16:01:17 -04007505
7506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1);
7507
7508 mutex_lock(&bp->hwrm_cmd_lock);
7509 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7510 if (rc) {
7511 mutex_unlock(&bp->hwrm_cmd_lock);
7512 return rc;
7513 }
7514
7515 memcpy(&link_info->phy_qcfg_resp, resp, sizeof(*resp));
7516 link_info->phy_link_status = resp->link;
Michael Chanacb20052017-07-24 12:34:20 -04007517 link_info->duplex = resp->duplex_cfg;
7518 if (bp->hwrm_spec_code >= 0x10800)
7519 link_info->duplex = resp->duplex_state;
Michael Chanc0c050c2015-10-22 16:01:17 -04007520 link_info->pause = resp->pause;
7521 link_info->auto_mode = resp->auto_mode;
7522 link_info->auto_pause_setting = resp->auto_pause;
Michael Chan32773602016-03-07 15:38:42 -05007523 link_info->lp_pause = resp->link_partner_adv_pause;
Michael Chanc0c050c2015-10-22 16:01:17 -04007524 link_info->force_pause_setting = resp->force_pause;
Michael Chanacb20052017-07-24 12:34:20 -04007525 link_info->duplex_setting = resp->duplex_cfg;
Michael Chanc0c050c2015-10-22 16:01:17 -04007526 if (link_info->phy_link_status == BNXT_LINK_LINK)
7527 link_info->link_speed = le16_to_cpu(resp->link_speed);
7528 else
7529 link_info->link_speed = 0;
7530 link_info->force_link_speed = le16_to_cpu(resp->force_link_speed);
Michael Chanc0c050c2015-10-22 16:01:17 -04007531 link_info->support_speeds = le16_to_cpu(resp->support_speeds);
7532 link_info->auto_link_speeds = le16_to_cpu(resp->auto_link_speed_mask);
Michael Chan32773602016-03-07 15:38:42 -05007533 link_info->lp_auto_link_speeds =
7534 le16_to_cpu(resp->link_partner_adv_speeds);
Michael Chanc0c050c2015-10-22 16:01:17 -04007535 link_info->preemphasis = le32_to_cpu(resp->preemphasis);
7536 link_info->phy_ver[0] = resp->phy_maj;
7537 link_info->phy_ver[1] = resp->phy_min;
7538 link_info->phy_ver[2] = resp->phy_bld;
7539 link_info->media_type = resp->media_type;
Michael Chan03efbec2016-04-11 04:11:11 -04007540 link_info->phy_type = resp->phy_type;
Michael Chan11f15ed2016-04-05 14:08:55 -04007541 link_info->transceiver = resp->xcvr_pkg_type;
Michael Chan170ce012016-04-05 14:08:57 -04007542 link_info->phy_addr = resp->eee_config_phy_addr &
7543 PORT_PHY_QCFG_RESP_PHY_ADDR_MASK;
Ajit Khaparde42ee18f2016-05-15 03:04:44 -04007544 link_info->module_status = resp->module_status;
Michael Chanc0c050c2015-10-22 16:01:17 -04007545
Michael Chan170ce012016-04-05 14:08:57 -04007546 if (bp->flags & BNXT_FLAG_EEE_CAP) {
7547 struct ethtool_eee *eee = &bp->eee;
7548 u16 fw_speeds;
7549
7550 eee->eee_active = 0;
7551 if (resp->eee_config_phy_addr &
7552 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ACTIVE) {
7553 eee->eee_active = 1;
7554 fw_speeds = le16_to_cpu(
7555 resp->link_partner_adv_eee_link_speed_mask);
7556 eee->lp_advertised =
7557 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7558 }
7559
7560 /* Pull initial EEE config */
7561 if (!chng_link_state) {
7562 if (resp->eee_config_phy_addr &
7563 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_ENABLED)
7564 eee->eee_enabled = 1;
7565
7566 fw_speeds = le16_to_cpu(resp->adv_eee_link_speed_mask);
7567 eee->advertised =
7568 _bnxt_fw_to_ethtool_adv_spds(fw_speeds, 0);
7569
7570 if (resp->eee_config_phy_addr &
7571 PORT_PHY_QCFG_RESP_EEE_CONFIG_EEE_TX_LPI) {
7572 __le32 tmr;
7573
7574 eee->tx_lpi_enabled = 1;
7575 tmr = resp->xcvr_identifier_type_tx_lpi_timer;
7576 eee->tx_lpi_timer = le32_to_cpu(tmr) &
7577 PORT_PHY_QCFG_RESP_TX_LPI_TIMER_MASK;
7578 }
7579 }
7580 }
Michael Chane70c7522017-02-12 19:18:16 -05007581
7582 link_info->fec_cfg = PORT_PHY_QCFG_RESP_FEC_CFG_FEC_NONE_SUPPORTED;
7583 if (bp->hwrm_spec_code >= 0x10504)
7584 link_info->fec_cfg = le16_to_cpu(resp->fec_cfg);
7585
Michael Chanc0c050c2015-10-22 16:01:17 -04007586 /* TODO: need to add more logic to report VF link */
7587 if (chng_link_state) {
7588 if (link_info->phy_link_status == BNXT_LINK_LINK)
7589 link_info->link_up = 1;
7590 else
7591 link_info->link_up = 0;
7592 if (link_up != link_info->link_up)
7593 bnxt_report_link(bp);
7594 } else {
7595 /* alwasy link down if not require to update link state */
7596 link_info->link_up = 0;
7597 }
7598 mutex_unlock(&bp->hwrm_cmd_lock);
Michael Chan286ef9d2016-11-16 21:13:08 -05007599
Michael Chandac049072018-05-08 03:18:39 -04007600 if (!BNXT_SINGLE_PF(bp))
7601 return 0;
7602
Michael Chan286ef9d2016-11-16 21:13:08 -05007603 diff = link_info->support_auto_speeds ^ link_info->advertising;
7604 if ((link_info->support_auto_speeds | diff) !=
7605 link_info->support_auto_speeds) {
7606 /* An advertised speed is no longer supported, so we need to
Michael Chan0eaa24b2017-01-25 02:55:08 -05007607 * update the advertisement settings. Caller holds RTNL
7608 * so we can modify link settings.
Michael Chan286ef9d2016-11-16 21:13:08 -05007609 */
Michael Chan286ef9d2016-11-16 21:13:08 -05007610 link_info->advertising = link_info->support_auto_speeds;
Michael Chan0eaa24b2017-01-25 02:55:08 -05007611 if (link_info->autoneg & BNXT_AUTONEG_SPEED)
Michael Chan286ef9d2016-11-16 21:13:08 -05007612 bnxt_hwrm_set_link_setting(bp, true, false);
Michael Chan286ef9d2016-11-16 21:13:08 -05007613 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007614 return 0;
7615}
7616
Michael Chan10289be2016-05-15 03:04:49 -04007617static void bnxt_get_port_module_status(struct bnxt *bp)
7618{
7619 struct bnxt_link_info *link_info = &bp->link_info;
7620 struct hwrm_port_phy_qcfg_output *resp = &link_info->phy_qcfg_resp;
7621 u8 module_status;
7622
7623 if (bnxt_update_link(bp, true))
7624 return;
7625
7626 module_status = link_info->module_status;
7627 switch (module_status) {
7628 case PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX:
7629 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN:
7630 case PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG:
7631 netdev_warn(bp->dev, "Unqualified SFP+ module detected on port %d\n",
7632 bp->pf.port_id);
7633 if (bp->hwrm_spec_code >= 0x10201) {
7634 netdev_warn(bp->dev, "Module part number %s\n",
7635 resp->phy_vendor_partnumber);
7636 }
7637 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_DISABLETX)
7638 netdev_warn(bp->dev, "TX is disabled\n");
7639 if (module_status == PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN)
7640 netdev_warn(bp->dev, "SFP+ module is shutdown\n");
7641 }
7642}
7643
Michael Chanc0c050c2015-10-22 16:01:17 -04007644static void
7645bnxt_hwrm_set_pause_common(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req)
7646{
7647 if (bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) {
Michael Chanc9ee9512016-04-05 14:08:56 -04007648 if (bp->hwrm_spec_code >= 0x10201)
7649 req->auto_pause =
7650 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE;
Michael Chanc0c050c2015-10-22 16:01:17 -04007651 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7652 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_RX;
7653 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
Michael Chan49b5c7a2016-03-28 19:46:06 -04007654 req->auto_pause |= PORT_PHY_CFG_REQ_AUTO_PAUSE_TX;
Michael Chanc0c050c2015-10-22 16:01:17 -04007655 req->enables |=
7656 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7657 } else {
7658 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_RX)
7659 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_RX;
7660 if (bp->link_info.req_flow_ctrl & BNXT_LINK_PAUSE_TX)
7661 req->force_pause |= PORT_PHY_CFG_REQ_FORCE_PAUSE_TX;
7662 req->enables |=
7663 cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_FORCE_PAUSE);
Michael Chanc9ee9512016-04-05 14:08:56 -04007664 if (bp->hwrm_spec_code >= 0x10201) {
7665 req->auto_pause = req->force_pause;
7666 req->enables |= cpu_to_le32(
7667 PORT_PHY_CFG_REQ_ENABLES_AUTO_PAUSE);
7668 }
Michael Chanc0c050c2015-10-22 16:01:17 -04007669 }
7670}
7671
7672static void bnxt_hwrm_set_link_common(struct bnxt *bp,
7673 struct hwrm_port_phy_cfg_input *req)
7674{
7675 u8 autoneg = bp->link_info.autoneg;
7676 u16 fw_link_speed = bp->link_info.req_link_speed;
Michael Chan68515a12016-12-29 12:13:34 -05007677 u16 advertising = bp->link_info.advertising;
Michael Chanc0c050c2015-10-22 16:01:17 -04007678
7679 if (autoneg & BNXT_AUTONEG_SPEED) {
7680 req->auto_mode |=
Michael Chan11f15ed2016-04-05 14:08:55 -04007681 PORT_PHY_CFG_REQ_AUTO_MODE_SPEED_MASK;
Michael Chanc0c050c2015-10-22 16:01:17 -04007682
7683 req->enables |= cpu_to_le32(
7684 PORT_PHY_CFG_REQ_ENABLES_AUTO_LINK_SPEED_MASK);
7685 req->auto_link_speed_mask = cpu_to_le16(advertising);
7686
7687 req->enables |= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_AUTO_MODE);
7688 req->flags |=
7689 cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESTART_AUTONEG);
7690 } else {
7691 req->force_link_speed = cpu_to_le16(fw_link_speed);
7692 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE);
7693 }
7694
Michael Chanc0c050c2015-10-22 16:01:17 -04007695 /* tell chimp that the setting takes effect immediately */
7696 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_RESET_PHY);
7697}
7698
7699int bnxt_hwrm_set_pause(struct bnxt *bp)
7700{
7701 struct hwrm_port_phy_cfg_input req = {0};
7702 int rc;
7703
7704 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7705 bnxt_hwrm_set_pause_common(bp, &req);
7706
7707 if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) ||
7708 bp->link_info.force_link_chng)
7709 bnxt_hwrm_set_link_common(bp, &req);
7710
7711 mutex_lock(&bp->hwrm_cmd_lock);
7712 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7713 if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) {
7714 /* since changing of pause setting doesn't trigger any link
7715 * change event, the driver needs to update the current pause
7716 * result upon successfully return of the phy_cfg command
7717 */
7718 bp->link_info.pause =
7719 bp->link_info.force_pause_setting = bp->link_info.req_flow_ctrl;
7720 bp->link_info.auto_pause_setting = 0;
7721 if (!bp->link_info.force_link_chng)
7722 bnxt_report_link(bp);
7723 }
7724 bp->link_info.force_link_chng = false;
7725 mutex_unlock(&bp->hwrm_cmd_lock);
7726 return rc;
7727}
7728
Michael Chan939f7f02016-04-05 14:08:58 -04007729static void bnxt_hwrm_set_eee(struct bnxt *bp,
7730 struct hwrm_port_phy_cfg_input *req)
7731{
7732 struct ethtool_eee *eee = &bp->eee;
7733
7734 if (eee->eee_enabled) {
7735 u16 eee_speeds;
7736 u32 flags = PORT_PHY_CFG_REQ_FLAGS_EEE_ENABLE;
7737
7738 if (eee->tx_lpi_enabled)
7739 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_ENABLE;
7740 else
7741 flags |= PORT_PHY_CFG_REQ_FLAGS_EEE_TX_LPI_DISABLE;
7742
7743 req->flags |= cpu_to_le32(flags);
7744 eee_speeds = bnxt_get_fw_auto_link_speeds(eee->advertised);
7745 req->eee_link_speed_mask = cpu_to_le16(eee_speeds);
7746 req->tx_lpi_timer = cpu_to_le32(eee->tx_lpi_timer);
7747 } else {
7748 req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_EEE_DISABLE);
7749 }
7750}
7751
7752int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee)
Michael Chanc0c050c2015-10-22 16:01:17 -04007753{
7754 struct hwrm_port_phy_cfg_input req = {0};
7755
7756 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
7757 if (set_pause)
7758 bnxt_hwrm_set_pause_common(bp, &req);
7759
7760 bnxt_hwrm_set_link_common(bp, &req);
Michael Chan939f7f02016-04-05 14:08:58 -04007761
7762 if (set_eee)
7763 bnxt_hwrm_set_eee(bp, &req);
Michael Chanc0c050c2015-10-22 16:01:17 -04007764 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7765}
7766
Michael Chan33f7d552016-04-11 04:11:12 -04007767static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
7768{
7769 struct hwrm_port_phy_cfg_input req = {0};
7770
Satish Baddipadige567b2ab2016-06-13 02:25:31 -04007771 if (!BNXT_SINGLE_PF(bp))
Michael Chan33f7d552016-04-11 04:11:12 -04007772 return 0;
7773
7774 if (pci_num_vf(bp->pdev))
7775 return 0;
7776
7777 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1);
Michael Chan16d663a2016-11-16 21:13:07 -05007778 req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN);
Michael Chan33f7d552016-04-11 04:11:12 -04007779 return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7780}
7781
Michael Chan25e1acd2018-08-05 16:51:55 -04007782static int bnxt_hwrm_if_change(struct bnxt *bp, bool up)
7783{
7784 struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr;
7785 struct hwrm_func_drv_if_change_input req = {0};
7786 bool resc_reinit = false;
7787 int rc;
7788
7789 if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE))
7790 return 0;
7791
7792 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1);
7793 if (up)
7794 req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP);
7795 mutex_lock(&bp->hwrm_cmd_lock);
7796 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7797 if (!rc && (resp->flags &
7798 cpu_to_le32(FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE)))
7799 resc_reinit = true;
7800 mutex_unlock(&bp->hwrm_cmd_lock);
7801
7802 if (up && resc_reinit && BNXT_NEW_RM(bp)) {
7803 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
7804
7805 rc = bnxt_hwrm_func_resc_qcaps(bp, true);
7806 hw_resc->resv_cp_rings = 0;
Michael Chan75720e62018-12-09 07:01:00 -05007807 hw_resc->resv_irqs = 0;
Michael Chan25e1acd2018-08-05 16:51:55 -04007808 hw_resc->resv_tx_rings = 0;
7809 hw_resc->resv_rx_rings = 0;
7810 hw_resc->resv_hw_ring_grps = 0;
7811 hw_resc->resv_vnics = 0;
Michael Chan6b95c3e2018-09-03 04:23:17 -04007812 bp->tx_nr_rings = 0;
7813 bp->rx_nr_rings = 0;
Michael Chan25e1acd2018-08-05 16:51:55 -04007814 }
7815 return rc;
7816}
7817
Michael Chan5ad2cbe2017-01-13 01:32:03 -05007818static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
7819{
7820 struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
7821 struct hwrm_port_led_qcaps_input req = {0};
7822 struct bnxt_pf_info *pf = &bp->pf;
7823 int rc;
7824
7825 if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
7826 return 0;
7827
7828 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
7829 req.port_id = cpu_to_le16(pf->port_id);
7830 mutex_lock(&bp->hwrm_cmd_lock);
7831 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7832 if (rc) {
7833 mutex_unlock(&bp->hwrm_cmd_lock);
7834 return rc;
7835 }
7836 if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
7837 int i;
7838
7839 bp->num_leds = resp->num_leds;
7840 memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
7841 bp->num_leds);
7842 for (i = 0; i < bp->num_leds; i++) {
7843 struct bnxt_led_info *led = &bp->leds[i];
7844 __le16 caps = led->led_state_caps;
7845
7846 if (!led->led_group_id ||
7847 !BNXT_LED_ALT_BLINK_CAP(caps)) {
7848 bp->num_leds = 0;
7849 break;
7850 }
7851 }
7852 }
7853 mutex_unlock(&bp->hwrm_cmd_lock);
7854 return 0;
7855}
7856
Michael Chan5282db62017-04-04 18:14:10 -04007857int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp)
7858{
7859 struct hwrm_wol_filter_alloc_input req = {0};
7860 struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr;
7861 int rc;
7862
7863 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1);
7864 req.port_id = cpu_to_le16(bp->pf.port_id);
7865 req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT;
7866 req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS);
7867 memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN);
7868 mutex_lock(&bp->hwrm_cmd_lock);
7869 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7870 if (!rc)
7871 bp->wol_filter_id = resp->wol_filter_id;
7872 mutex_unlock(&bp->hwrm_cmd_lock);
7873 return rc;
7874}
7875
7876int bnxt_hwrm_free_wol_fltr(struct bnxt *bp)
7877{
7878 struct hwrm_wol_filter_free_input req = {0};
7879 int rc;
7880
7881 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1);
7882 req.port_id = cpu_to_le16(bp->pf.port_id);
7883 req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID);
7884 req.wol_filter_id = bp->wol_filter_id;
7885 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7886 return rc;
7887}
7888
Michael Chanc1ef1462017-04-04 18:14:07 -04007889static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle)
7890{
7891 struct hwrm_wol_filter_qcfg_input req = {0};
7892 struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr;
7893 u16 next_handle = 0;
7894 int rc;
7895
7896 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1);
7897 req.port_id = cpu_to_le16(bp->pf.port_id);
7898 req.handle = cpu_to_le16(handle);
7899 mutex_lock(&bp->hwrm_cmd_lock);
7900 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
7901 if (!rc) {
7902 next_handle = le16_to_cpu(resp->next_handle);
7903 if (next_handle != 0) {
7904 if (resp->wol_type ==
7905 WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT) {
7906 bp->wol = 1;
7907 bp->wol_filter_id = resp->wol_filter_id;
7908 }
7909 }
7910 }
7911 mutex_unlock(&bp->hwrm_cmd_lock);
7912 return next_handle;
7913}
7914
7915static void bnxt_get_wol_settings(struct bnxt *bp)
7916{
7917 u16 handle = 0;
7918
7919 if (!BNXT_PF(bp) || !(bp->flags & BNXT_FLAG_WOL_CAP))
7920 return;
7921
7922 do {
7923 handle = bnxt_hwrm_get_wol_fltrs(bp, handle);
7924 } while (handle && handle != 0xffff);
7925}
7926
Vasundhara Volamcde49a42018-08-05 16:51:56 -04007927#ifdef CONFIG_BNXT_HWMON
7928static ssize_t bnxt_show_temp(struct device *dev,
7929 struct device_attribute *devattr, char *buf)
7930{
7931 struct hwrm_temp_monitor_query_input req = {0};
7932 struct hwrm_temp_monitor_query_output *resp;
7933 struct bnxt *bp = dev_get_drvdata(dev);
7934 u32 temp = 0;
7935
7936 resp = bp->hwrm_cmd_resp_addr;
7937 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1);
7938 mutex_lock(&bp->hwrm_cmd_lock);
7939 if (!_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT))
7940 temp = resp->temp * 1000; /* display millidegree */
7941 mutex_unlock(&bp->hwrm_cmd_lock);
7942
7943 return sprintf(buf, "%u\n", temp);
7944}
7945static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0);
7946
7947static struct attribute *bnxt_attrs[] = {
7948 &sensor_dev_attr_temp1_input.dev_attr.attr,
7949 NULL
7950};
7951ATTRIBUTE_GROUPS(bnxt);
7952
7953static void bnxt_hwmon_close(struct bnxt *bp)
7954{
7955 if (bp->hwmon_dev) {
7956 hwmon_device_unregister(bp->hwmon_dev);
7957 bp->hwmon_dev = NULL;
7958 }
7959}
7960
7961static void bnxt_hwmon_open(struct bnxt *bp)
7962{
7963 struct pci_dev *pdev = bp->pdev;
7964
7965 bp->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
7966 DRV_MODULE_NAME, bp,
7967 bnxt_groups);
7968 if (IS_ERR(bp->hwmon_dev)) {
7969 bp->hwmon_dev = NULL;
7970 dev_warn(&pdev->dev, "Cannot register hwmon device\n");
7971 }
7972}
7973#else
7974static void bnxt_hwmon_close(struct bnxt *bp)
7975{
7976}
7977
7978static void bnxt_hwmon_open(struct bnxt *bp)
7979{
7980}
7981#endif
7982
Michael Chan939f7f02016-04-05 14:08:58 -04007983static bool bnxt_eee_config_ok(struct bnxt *bp)
7984{
7985 struct ethtool_eee *eee = &bp->eee;
7986 struct bnxt_link_info *link_info = &bp->link_info;
7987
7988 if (!(bp->flags & BNXT_FLAG_EEE_CAP))
7989 return true;
7990
7991 if (eee->eee_enabled) {
7992 u32 advertising =
7993 _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0);
7994
7995 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
7996 eee->eee_enabled = 0;
7997 return false;
7998 }
7999 if (eee->advertised & ~advertising) {
8000 eee->advertised = advertising & eee->supported;
8001 return false;
8002 }
8003 }
8004 return true;
8005}
8006
Michael Chanc0c050c2015-10-22 16:01:17 -04008007static int bnxt_update_phy_setting(struct bnxt *bp)
8008{
8009 int rc;
8010 bool update_link = false;
8011 bool update_pause = false;
Michael Chan939f7f02016-04-05 14:08:58 -04008012 bool update_eee = false;
Michael Chanc0c050c2015-10-22 16:01:17 -04008013 struct bnxt_link_info *link_info = &bp->link_info;
8014
8015 rc = bnxt_update_link(bp, true);
8016 if (rc) {
8017 netdev_err(bp->dev, "failed to update link (rc: %x)\n",
8018 rc);
8019 return rc;
8020 }
Michael Chan33dac242017-02-12 19:18:15 -05008021 if (!BNXT_SINGLE_PF(bp))
8022 return 0;
8023
Michael Chanc0c050c2015-10-22 16:01:17 -04008024 if ((link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
Michael Chanc9ee9512016-04-05 14:08:56 -04008025 (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) !=
8026 link_info->req_flow_ctrl)
Michael Chanc0c050c2015-10-22 16:01:17 -04008027 update_pause = true;
8028 if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
8029 link_info->force_pause_setting != link_info->req_flow_ctrl)
8030 update_pause = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04008031 if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
8032 if (BNXT_AUTO_MODE(link_info->auto_mode))
8033 update_link = true;
8034 if (link_info->req_link_speed != link_info->force_link_speed)
8035 update_link = true;
Michael Chande730182016-02-19 19:43:20 -05008036 if (link_info->req_duplex != link_info->duplex_setting)
8037 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04008038 } else {
8039 if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
8040 update_link = true;
8041 if (link_info->advertising != link_info->auto_link_speeds)
8042 update_link = true;
Michael Chanc0c050c2015-10-22 16:01:17 -04008043 }
8044
Michael Chan16d663a2016-11-16 21:13:07 -05008045 /* The last close may have shutdown the link, so need to call
8046 * PHY_CFG to bring it back up.
8047 */
8048 if (!netif_carrier_ok(bp->dev))
8049 update_link = true;
8050
Michael Chan939f7f02016-04-05 14:08:58 -04008051 if (!bnxt_eee_config_ok(bp))
8052 update_eee = true;
8053
Michael Chanc0c050c2015-10-22 16:01:17 -04008054 if (update_link)
Michael Chan939f7f02016-04-05 14:08:58 -04008055 rc = bnxt_hwrm_set_link_setting(bp, update_pause, update_eee);
Michael Chanc0c050c2015-10-22 16:01:17 -04008056 else if (update_pause)
8057 rc = bnxt_hwrm_set_pause(bp);
8058 if (rc) {
8059 netdev_err(bp->dev, "failed to update phy setting (rc: %x)\n",
8060 rc);
8061 return rc;
8062 }
8063
8064 return rc;
8065}
8066
Jeffrey Huang11809492015-11-05 16:25:49 -05008067/* Common routine to pre-map certain register block to different GRC window.
8068 * A PF has 16 4K windows and a VF has 4 4K windows. However, only 15 windows
8069 * in PF and 3 windows in VF that can be customized to map in different
8070 * register blocks.
8071 */
8072static void bnxt_preset_reg_win(struct bnxt *bp)
8073{
8074 if (BNXT_PF(bp)) {
8075 /* CAG registers map to GRC window #4 */
8076 writel(BNXT_CAG_REG_BASE,
8077 bp->bar0 + BNXT_GRCPF_REG_WINDOW_BASE_OUT + 12);
8078 }
8079}
8080
Michael Chan47558ac2018-04-26 17:44:44 -04008081static int bnxt_init_dflt_ring_mode(struct bnxt *bp);
8082
Michael Chanc0c050c2015-10-22 16:01:17 -04008083static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8084{
8085 int rc = 0;
8086
Jeffrey Huang11809492015-11-05 16:25:49 -05008087 bnxt_preset_reg_win(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008088 netif_carrier_off(bp->dev);
8089 if (irq_re_init) {
Michael Chan47558ac2018-04-26 17:44:44 -04008090 /* Reserve rings now if none were reserved at driver probe. */
8091 rc = bnxt_init_dflt_ring_mode(bp);
8092 if (rc) {
8093 netdev_err(bp->dev, "Failed to reserve default rings at open\n");
8094 return rc;
8095 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008096 }
Michael Chan41e8d792018-10-14 07:02:48 -04008097 rc = bnxt_reserve_rings(bp);
8098 if (rc)
8099 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008100 if ((bp->flags & BNXT_FLAG_RFS) &&
8101 !(bp->flags & BNXT_FLAG_USING_MSIX)) {
8102 /* disable RFS if falling back to INTA */
8103 bp->dev->hw_features &= ~NETIF_F_NTUPLE;
8104 bp->flags &= ~BNXT_FLAG_RFS;
8105 }
8106
8107 rc = bnxt_alloc_mem(bp, irq_re_init);
8108 if (rc) {
8109 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8110 goto open_err_free_mem;
8111 }
8112
8113 if (irq_re_init) {
8114 bnxt_init_napi(bp);
8115 rc = bnxt_request_irq(bp);
8116 if (rc) {
8117 netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
Vikas Guptac58387a2018-07-09 02:24:52 -04008118 goto open_err_irq;
Michael Chanc0c050c2015-10-22 16:01:17 -04008119 }
8120 }
8121
8122 bnxt_enable_napi(bp);
Andy Gospodarekcabfb092018-04-26 17:44:40 -04008123 bnxt_debug_dev_init(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008124
8125 rc = bnxt_init_nic(bp, irq_re_init);
8126 if (rc) {
8127 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8128 goto open_err;
8129 }
8130
8131 if (link_re_init) {
Michael Chane2dc9b62017-10-13 21:09:30 -04008132 mutex_lock(&bp->link_lock);
Michael Chanc0c050c2015-10-22 16:01:17 -04008133 rc = bnxt_update_phy_setting(bp);
Michael Chane2dc9b62017-10-13 21:09:30 -04008134 mutex_unlock(&bp->link_lock);
Michael Chana1ef4a72018-08-05 16:51:49 -04008135 if (rc) {
Michael Chanba41d462016-02-19 19:43:21 -05008136 netdev_warn(bp->dev, "failed to update phy settings\n");
Michael Chana1ef4a72018-08-05 16:51:49 -04008137 if (BNXT_SINGLE_PF(bp)) {
8138 bp->link_info.phy_retry = true;
8139 bp->link_info.phy_retry_expires =
8140 jiffies + 5 * HZ;
8141 }
8142 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008143 }
8144
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07008145 if (irq_re_init)
Alexander Duyckad51b8e2016-06-16 12:21:19 -07008146 udp_tunnel_get_rx_info(bp->dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04008147
Michael Chancaefe522015-12-09 19:35:42 -05008148 set_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04008149 bnxt_enable_int(bp);
8150 /* Enable TX queues */
8151 bnxt_tx_enable(bp);
8152 mod_timer(&bp->timer, jiffies + bp->current_interval);
Michael Chan10289be2016-05-15 03:04:49 -04008153 /* Poll link status and check for SFP+ module status */
8154 bnxt_get_port_module_status(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008155
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04008156 /* VF-reps may need to be re-opened after the PF is re-opened */
8157 if (BNXT_PF(bp))
8158 bnxt_vf_reps_open(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008159 return 0;
8160
8161open_err:
Andy Gospodarekcabfb092018-04-26 17:44:40 -04008162 bnxt_debug_dev_exit(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008163 bnxt_disable_napi(bp);
Vikas Guptac58387a2018-07-09 02:24:52 -04008164
8165open_err_irq:
Michael Chanc0c050c2015-10-22 16:01:17 -04008166 bnxt_del_napi(bp);
8167
8168open_err_free_mem:
8169 bnxt_free_skbs(bp);
8170 bnxt_free_irq(bp);
8171 bnxt_free_mem(bp, true);
8172 return rc;
8173}
8174
8175/* rtnl_lock held */
8176int bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8177{
8178 int rc = 0;
8179
8180 rc = __bnxt_open_nic(bp, irq_re_init, link_re_init);
8181 if (rc) {
8182 netdev_err(bp->dev, "nic open fail (rc: %x)\n", rc);
8183 dev_close(bp->dev);
8184 }
8185 return rc;
8186}
8187
Michael Chanf7dc1ea2017-04-04 18:14:13 -04008188/* rtnl_lock held, open the NIC half way by allocating all resources, but
8189 * NAPI, IRQ, and TX are not enabled. This is mainly used for offline
8190 * self tests.
8191 */
8192int bnxt_half_open_nic(struct bnxt *bp)
8193{
8194 int rc = 0;
8195
8196 rc = bnxt_alloc_mem(bp, false);
8197 if (rc) {
8198 netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
8199 goto half_open_err;
8200 }
8201 rc = bnxt_init_nic(bp, false);
8202 if (rc) {
8203 netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
8204 goto half_open_err;
8205 }
8206 return 0;
8207
8208half_open_err:
8209 bnxt_free_skbs(bp);
8210 bnxt_free_mem(bp, false);
8211 dev_close(bp->dev);
8212 return rc;
8213}
8214
8215/* rtnl_lock held, this call can only be made after a previous successful
8216 * call to bnxt_half_open_nic().
8217 */
8218void bnxt_half_close_nic(struct bnxt *bp)
8219{
8220 bnxt_hwrm_resource_free(bp, false, false);
8221 bnxt_free_skbs(bp);
8222 bnxt_free_mem(bp, false);
8223}
8224
Michael Chanc0c050c2015-10-22 16:01:17 -04008225static int bnxt_open(struct net_device *dev)
8226{
8227 struct bnxt *bp = netdev_priv(dev);
Michael Chan25e1acd2018-08-05 16:51:55 -04008228 int rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008229
Michael Chan25e1acd2018-08-05 16:51:55 -04008230 bnxt_hwrm_if_change(bp, true);
8231 rc = __bnxt_open_nic(bp, true, true);
8232 if (rc)
8233 bnxt_hwrm_if_change(bp, false);
Vasundhara Volamcde49a42018-08-05 16:51:56 -04008234
8235 bnxt_hwmon_open(bp);
8236
Michael Chan25e1acd2018-08-05 16:51:55 -04008237 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008238}
8239
Michael Chanf9b76eb2017-07-11 13:05:34 -04008240static bool bnxt_drv_busy(struct bnxt *bp)
8241{
8242 return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) ||
8243 test_bit(BNXT_STATE_READ_STATS, &bp->state));
8244}
8245
Michael Chan86e953d2018-01-17 03:21:04 -05008246static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init,
8247 bool link_re_init)
Michael Chanc0c050c2015-10-22 16:01:17 -04008248{
Sathya Perlaee5c7fb2017-07-24 12:34:28 -04008249 /* Close the VF-reps before closing PF */
8250 if (BNXT_PF(bp))
8251 bnxt_vf_reps_close(bp);
Michael Chan86e953d2018-01-17 03:21:04 -05008252
Michael Chanc0c050c2015-10-22 16:01:17 -04008253 /* Change device state to avoid TX queue wake up's */
8254 bnxt_tx_disable(bp);
8255
Michael Chancaefe522015-12-09 19:35:42 -05008256 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chan4cebdce2015-12-09 19:35:43 -05008257 smp_mb__after_atomic();
Michael Chanf9b76eb2017-07-11 13:05:34 -04008258 while (bnxt_drv_busy(bp))
Michael Chan4cebdce2015-12-09 19:35:43 -05008259 msleep(20);
Michael Chanc0c050c2015-10-22 16:01:17 -04008260
Michael Chan9d8bc092016-12-29 12:13:33 -05008261 /* Flush rings and and disable interrupts */
Michael Chanc0c050c2015-10-22 16:01:17 -04008262 bnxt_shutdown_nic(bp, irq_re_init);
8263
8264 /* TODO CHIMP_FW: Link/PHY related cleanup if (link_re_init) */
8265
Andy Gospodarekcabfb092018-04-26 17:44:40 -04008266 bnxt_debug_dev_exit(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008267 bnxt_disable_napi(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008268 del_timer_sync(&bp->timer);
8269 bnxt_free_skbs(bp);
8270
8271 if (irq_re_init) {
8272 bnxt_free_irq(bp);
8273 bnxt_del_napi(bp);
8274 }
8275 bnxt_free_mem(bp, irq_re_init);
Michael Chan86e953d2018-01-17 03:21:04 -05008276}
8277
8278int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
8279{
8280 int rc = 0;
8281
8282#ifdef CONFIG_BNXT_SRIOV
8283 if (bp->sriov_cfg) {
8284 rc = wait_event_interruptible_timeout(bp->sriov_cfg_wait,
8285 !bp->sriov_cfg,
8286 BNXT_SRIOV_CFG_WAIT_TMO);
8287 if (rc)
8288 netdev_warn(bp->dev, "timeout waiting for SRIOV config operation to complete!\n");
8289 }
8290#endif
8291 __bnxt_close_nic(bp, irq_re_init, link_re_init);
Michael Chanc0c050c2015-10-22 16:01:17 -04008292 return rc;
8293}
8294
8295static int bnxt_close(struct net_device *dev)
8296{
8297 struct bnxt *bp = netdev_priv(dev);
8298
Vasundhara Volamcde49a42018-08-05 16:51:56 -04008299 bnxt_hwmon_close(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008300 bnxt_close_nic(bp, true, true);
Michael Chan33f7d552016-04-11 04:11:12 -04008301 bnxt_hwrm_shutdown_link(bp);
Michael Chan25e1acd2018-08-05 16:51:55 -04008302 bnxt_hwrm_if_change(bp, false);
Michael Chanc0c050c2015-10-22 16:01:17 -04008303 return 0;
8304}
8305
8306/* rtnl_lock held */
8307static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8308{
8309 switch (cmd) {
8310 case SIOCGMIIPHY:
8311 /* fallthru */
8312 case SIOCGMIIREG: {
8313 if (!netif_running(dev))
8314 return -EAGAIN;
8315
8316 return 0;
8317 }
8318
8319 case SIOCSMIIREG:
8320 if (!netif_running(dev))
8321 return -EAGAIN;
8322
8323 return 0;
8324
8325 default:
8326 /* do nothing */
8327 break;
8328 }
8329 return -EOPNOTSUPP;
8330}
8331
stephen hemmingerbc1f4472017-01-06 19:12:52 -08008332static void
Michael Chanc0c050c2015-10-22 16:01:17 -04008333bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
8334{
8335 u32 i;
8336 struct bnxt *bp = netdev_priv(dev);
8337
Michael Chanf9b76eb2017-07-11 13:05:34 -04008338 set_bit(BNXT_STATE_READ_STATS, &bp->state);
8339 /* Make sure bnxt_close_nic() sees that we are reading stats before
8340 * we check the BNXT_STATE_OPEN flag.
8341 */
8342 smp_mb__after_atomic();
8343 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8344 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
stephen hemmingerbc1f4472017-01-06 19:12:52 -08008345 return;
Michael Chanf9b76eb2017-07-11 13:05:34 -04008346 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008347
8348 /* TODO check if we need to synchronize with bnxt_close path */
8349 for (i = 0; i < bp->cp_nr_rings; i++) {
8350 struct bnxt_napi *bnapi = bp->bnapi[i];
8351 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8352 struct ctx_hw_stats *hw_stats = cpr->hw_stats;
8353
8354 stats->rx_packets += le64_to_cpu(hw_stats->rx_ucast_pkts);
8355 stats->rx_packets += le64_to_cpu(hw_stats->rx_mcast_pkts);
8356 stats->rx_packets += le64_to_cpu(hw_stats->rx_bcast_pkts);
8357
8358 stats->tx_packets += le64_to_cpu(hw_stats->tx_ucast_pkts);
8359 stats->tx_packets += le64_to_cpu(hw_stats->tx_mcast_pkts);
8360 stats->tx_packets += le64_to_cpu(hw_stats->tx_bcast_pkts);
8361
8362 stats->rx_bytes += le64_to_cpu(hw_stats->rx_ucast_bytes);
8363 stats->rx_bytes += le64_to_cpu(hw_stats->rx_mcast_bytes);
8364 stats->rx_bytes += le64_to_cpu(hw_stats->rx_bcast_bytes);
8365
8366 stats->tx_bytes += le64_to_cpu(hw_stats->tx_ucast_bytes);
8367 stats->tx_bytes += le64_to_cpu(hw_stats->tx_mcast_bytes);
8368 stats->tx_bytes += le64_to_cpu(hw_stats->tx_bcast_bytes);
8369
8370 stats->rx_missed_errors +=
8371 le64_to_cpu(hw_stats->rx_discard_pkts);
8372
8373 stats->multicast += le64_to_cpu(hw_stats->rx_mcast_pkts);
8374
Michael Chanc0c050c2015-10-22 16:01:17 -04008375 stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts);
8376 }
8377
Michael Chan9947f832016-03-07 15:38:46 -05008378 if (bp->flags & BNXT_FLAG_PORT_STATS) {
8379 struct rx_port_stats *rx = bp->hw_rx_port_stats;
8380 struct tx_port_stats *tx = bp->hw_tx_port_stats;
8381
8382 stats->rx_crc_errors = le64_to_cpu(rx->rx_fcs_err_frames);
8383 stats->rx_frame_errors = le64_to_cpu(rx->rx_align_err_frames);
8384 stats->rx_length_errors = le64_to_cpu(rx->rx_undrsz_frames) +
8385 le64_to_cpu(rx->rx_ovrsz_frames) +
8386 le64_to_cpu(rx->rx_runt_frames);
8387 stats->rx_errors = le64_to_cpu(rx->rx_false_carrier_frames) +
8388 le64_to_cpu(rx->rx_jbr_frames);
8389 stats->collisions = le64_to_cpu(tx->tx_total_collisions);
8390 stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns);
8391 stats->tx_errors = le64_to_cpu(tx->tx_err);
8392 }
Michael Chanf9b76eb2017-07-11 13:05:34 -04008393 clear_bit(BNXT_STATE_READ_STATS, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04008394}
8395
8396static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask)
8397{
8398 struct net_device *dev = bp->dev;
8399 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8400 struct netdev_hw_addr *ha;
8401 u8 *haddr;
8402 int mc_count = 0;
8403 bool update = false;
8404 int off = 0;
8405
8406 netdev_for_each_mc_addr(ha, dev) {
8407 if (mc_count >= BNXT_MAX_MC_ADDRS) {
8408 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8409 vnic->mc_list_count = 0;
8410 return false;
8411 }
8412 haddr = ha->addr;
8413 if (!ether_addr_equal(haddr, vnic->mc_list + off)) {
8414 memcpy(vnic->mc_list + off, haddr, ETH_ALEN);
8415 update = true;
8416 }
8417 off += ETH_ALEN;
8418 mc_count++;
8419 }
8420 if (mc_count)
8421 *rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_MCAST;
8422
8423 if (mc_count != vnic->mc_list_count) {
8424 vnic->mc_list_count = mc_count;
8425 update = true;
8426 }
8427 return update;
8428}
8429
8430static bool bnxt_uc_list_updated(struct bnxt *bp)
8431{
8432 struct net_device *dev = bp->dev;
8433 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8434 struct netdev_hw_addr *ha;
8435 int off = 0;
8436
8437 if (netdev_uc_count(dev) != (vnic->uc_filter_count - 1))
8438 return true;
8439
8440 netdev_for_each_uc_addr(ha, dev) {
8441 if (!ether_addr_equal(ha->addr, vnic->uc_list + off))
8442 return true;
8443
8444 off += ETH_ALEN;
8445 }
8446 return false;
8447}
8448
8449static void bnxt_set_rx_mode(struct net_device *dev)
8450{
8451 struct bnxt *bp = netdev_priv(dev);
8452 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8453 u32 mask = vnic->rx_mask;
8454 bool mc_update = false;
8455 bool uc_update;
8456
8457 if (!netif_running(dev))
8458 return;
8459
8460 mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
8461 CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
Michael Chan30e33842018-07-09 02:24:50 -04008462 CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
8463 CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
Michael Chanc0c050c2015-10-22 16:01:17 -04008464
Michael Chan17c71ac2016-07-01 18:46:27 -04008465 if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
Michael Chanc0c050c2015-10-22 16:01:17 -04008466 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8467
8468 uc_update = bnxt_uc_list_updated(bp);
8469
Michael Chan30e33842018-07-09 02:24:50 -04008470 if (dev->flags & IFF_BROADCAST)
8471 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
Michael Chanc0c050c2015-10-22 16:01:17 -04008472 if (dev->flags & IFF_ALLMULTI) {
8473 mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
8474 vnic->mc_list_count = 0;
8475 } else {
8476 mc_update = bnxt_mc_list_updated(bp, &mask);
8477 }
8478
8479 if (mask != vnic->rx_mask || uc_update || mc_update) {
8480 vnic->rx_mask = mask;
8481
8482 set_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04008483 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008484 }
8485}
8486
Michael Chanb664f002015-12-02 01:54:08 -05008487static int bnxt_cfg_rx_mode(struct bnxt *bp)
Michael Chanc0c050c2015-10-22 16:01:17 -04008488{
8489 struct net_device *dev = bp->dev;
8490 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
8491 struct netdev_hw_addr *ha;
8492 int i, off = 0, rc;
8493 bool uc_update;
8494
8495 netif_addr_lock_bh(dev);
8496 uc_update = bnxt_uc_list_updated(bp);
8497 netif_addr_unlock_bh(dev);
8498
8499 if (!uc_update)
8500 goto skip_uc;
8501
8502 mutex_lock(&bp->hwrm_cmd_lock);
8503 for (i = 1; i < vnic->uc_filter_count; i++) {
8504 struct hwrm_cfa_l2_filter_free_input req = {0};
8505
8506 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1,
8507 -1);
8508
8509 req.l2_filter_id = vnic->fw_l2_filter_id[i];
8510
8511 rc = _hwrm_send_message(bp, &req, sizeof(req),
8512 HWRM_CMD_TIMEOUT);
8513 }
8514 mutex_unlock(&bp->hwrm_cmd_lock);
8515
8516 vnic->uc_filter_count = 1;
8517
8518 netif_addr_lock_bh(dev);
8519 if (netdev_uc_count(dev) > (BNXT_MAX_UC_ADDRS - 1)) {
8520 vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
8521 } else {
8522 netdev_for_each_uc_addr(ha, dev) {
8523 memcpy(vnic->uc_list + off, ha->addr, ETH_ALEN);
8524 off += ETH_ALEN;
8525 vnic->uc_filter_count++;
8526 }
8527 }
8528 netif_addr_unlock_bh(dev);
8529
8530 for (i = 1, off = 0; i < vnic->uc_filter_count; i++, off += ETH_ALEN) {
8531 rc = bnxt_hwrm_set_vnic_filter(bp, 0, i, vnic->uc_list + off);
8532 if (rc) {
8533 netdev_err(bp->dev, "HWRM vnic filter failure rc: %x\n",
8534 rc);
8535 vnic->uc_filter_count = i;
Michael Chanb664f002015-12-02 01:54:08 -05008536 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008537 }
8538 }
8539
8540skip_uc:
8541 rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, 0);
8542 if (rc)
8543 netdev_err(bp->dev, "HWRM cfa l2 rx mask failure rc: %x\n",
8544 rc);
Michael Chanb664f002015-12-02 01:54:08 -05008545
8546 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04008547}
8548
Michael Chan2773dfb2018-04-26 17:44:42 -04008549static bool bnxt_can_reserve_rings(struct bnxt *bp)
8550{
8551#ifdef CONFIG_BNXT_SRIOV
Michael Chanf1ca94d2018-08-05 16:51:53 -04008552 if (BNXT_NEW_RM(bp) && BNXT_VF(bp)) {
Michael Chan2773dfb2018-04-26 17:44:42 -04008553 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
8554
8555 /* No minimum rings were provisioned by the PF. Don't
8556 * reserve rings by default when device is down.
8557 */
8558 if (hw_resc->min_tx_rings || hw_resc->resv_tx_rings)
8559 return true;
8560
8561 if (!netif_running(bp->dev))
8562 return false;
8563 }
8564#endif
8565 return true;
8566}
8567
Michael Chan8079e8f2016-12-29 12:13:37 -05008568/* If the chip and firmware supports RFS */
8569static bool bnxt_rfs_supported(struct bnxt *bp)
8570{
Michael Chan41e8d792018-10-14 07:02:48 -04008571 if (bp->flags & BNXT_FLAG_CHIP_P5)
8572 return false;
Michael Chan8079e8f2016-12-29 12:13:37 -05008573 if (BNXT_PF(bp) && !BNXT_CHIP_TYPE_NITRO_A0(bp))
8574 return true;
Michael Chanae10ae72016-12-29 12:13:38 -05008575 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8576 return true;
Michael Chan8079e8f2016-12-29 12:13:37 -05008577 return false;
8578}
8579
8580/* If runtime conditions support RFS */
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008581static bool bnxt_rfs_capable(struct bnxt *bp)
8582{
8583#ifdef CONFIG_RFS_ACCEL
Michael Chan8079e8f2016-12-29 12:13:37 -05008584 int vnics, max_vnics, max_rss_ctxs;
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008585
Michael Chan41e8d792018-10-14 07:02:48 -04008586 if (bp->flags & BNXT_FLAG_CHIP_P5)
8587 return false;
Michael Chan2773dfb2018-04-26 17:44:42 -04008588 if (!(bp->flags & BNXT_FLAG_MSIX_CAP) || !bnxt_can_reserve_rings(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008589 return false;
8590
8591 vnics = 1 + bp->rx_nr_rings;
Michael Chan8079e8f2016-12-29 12:13:37 -05008592 max_vnics = bnxt_get_max_func_vnics(bp);
8593 max_rss_ctxs = bnxt_get_max_func_rss_ctxs(bp);
Michael Chanae10ae72016-12-29 12:13:38 -05008594
8595 /* RSS contexts not a limiting factor */
8596 if (bp->flags & BNXT_FLAG_NEW_RSS_CAP)
8597 max_rss_ctxs = max_vnics;
Michael Chan8079e8f2016-12-29 12:13:37 -05008598 if (vnics > max_vnics || vnics > max_rss_ctxs) {
Michael Chan6a1eef52018-01-17 03:21:10 -05008599 if (bp->rx_nr_rings > 1)
8600 netdev_warn(bp->dev,
8601 "Not enough resources to support NTUPLE filters, enough resources for up to %d rx rings\n",
8602 min(max_rss_ctxs - 1, max_vnics - 1));
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008603 return false;
Vasundhara Volama2304902016-07-25 12:33:36 -04008604 }
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008605
Michael Chanf1ca94d2018-08-05 16:51:53 -04008606 if (!BNXT_NEW_RM(bp))
Michael Chan6a1eef52018-01-17 03:21:10 -05008607 return true;
8608
8609 if (vnics == bp->hw_resc.resv_vnics)
8610 return true;
8611
8612 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics);
8613 if (vnics <= bp->hw_resc.resv_vnics)
8614 return true;
8615
8616 netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n");
8617 bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1);
8618 return false;
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008619#else
8620 return false;
8621#endif
8622}
8623
Michael Chanc0c050c2015-10-22 16:01:17 -04008624static netdev_features_t bnxt_fix_features(struct net_device *dev,
8625 netdev_features_t features)
8626{
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008627 struct bnxt *bp = netdev_priv(dev);
8628
Vasundhara Volama2304902016-07-25 12:33:36 -04008629 if ((features & NETIF_F_NTUPLE) && !bnxt_rfs_capable(bp))
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008630 features &= ~NETIF_F_NTUPLE;
Michael Chan5a9f6b22016-06-06 02:37:15 -04008631
Michael Chan1054aee2017-12-16 03:09:42 -05008632 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8633 features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
8634
8635 if (!(features & NETIF_F_GRO))
8636 features &= ~NETIF_F_GRO_HW;
8637
8638 if (features & NETIF_F_GRO_HW)
8639 features &= ~NETIF_F_LRO;
8640
Michael Chan5a9f6b22016-06-06 02:37:15 -04008641 /* Both CTAG and STAG VLAN accelaration on the RX side have to be
8642 * turned on or off together.
8643 */
8644 if ((features & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) !=
8645 (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX)) {
8646 if (dev->features & NETIF_F_HW_VLAN_CTAG_RX)
8647 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8648 NETIF_F_HW_VLAN_STAG_RX);
8649 else
8650 features |= NETIF_F_HW_VLAN_CTAG_RX |
8651 NETIF_F_HW_VLAN_STAG_RX;
8652 }
Michael Chancf6645f2016-06-13 02:25:28 -04008653#ifdef CONFIG_BNXT_SRIOV
8654 if (BNXT_VF(bp)) {
8655 if (bp->vf.vlan) {
8656 features &= ~(NETIF_F_HW_VLAN_CTAG_RX |
8657 NETIF_F_HW_VLAN_STAG_RX);
8658 }
8659 }
8660#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04008661 return features;
8662}
8663
8664static int bnxt_set_features(struct net_device *dev, netdev_features_t features)
8665{
8666 struct bnxt *bp = netdev_priv(dev);
8667 u32 flags = bp->flags;
8668 u32 changes;
8669 int rc = 0;
8670 bool re_init = false;
8671 bool update_tpa = false;
8672
8673 flags &= ~BNXT_FLAG_ALL_CONFIG_FEATS;
Michael Chan1054aee2017-12-16 03:09:42 -05008674 if (features & NETIF_F_GRO_HW)
Michael Chanc0c050c2015-10-22 16:01:17 -04008675 flags |= BNXT_FLAG_GRO;
Michael Chan1054aee2017-12-16 03:09:42 -05008676 else if (features & NETIF_F_LRO)
Michael Chanc0c050c2015-10-22 16:01:17 -04008677 flags |= BNXT_FLAG_LRO;
8678
Michael Chanbdbd1eb2016-12-29 12:13:43 -05008679 if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
8680 flags &= ~BNXT_FLAG_TPA;
8681
Michael Chanc0c050c2015-10-22 16:01:17 -04008682 if (features & NETIF_F_HW_VLAN_CTAG_RX)
8683 flags |= BNXT_FLAG_STRIP_VLAN;
8684
8685 if (features & NETIF_F_NTUPLE)
8686 flags |= BNXT_FLAG_RFS;
8687
8688 changes = flags ^ bp->flags;
8689 if (changes & BNXT_FLAG_TPA) {
8690 update_tpa = true;
8691 if ((bp->flags & BNXT_FLAG_TPA) == 0 ||
8692 (flags & BNXT_FLAG_TPA) == 0)
8693 re_init = true;
8694 }
8695
8696 if (changes & ~BNXT_FLAG_TPA)
8697 re_init = true;
8698
8699 if (flags != bp->flags) {
8700 u32 old_flags = bp->flags;
8701
8702 bp->flags = flags;
8703
Michael Chan2bcfa6f2015-12-27 18:19:24 -05008704 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04008705 if (update_tpa)
8706 bnxt_set_ring_params(bp);
8707 return rc;
8708 }
8709
8710 if (re_init) {
8711 bnxt_close_nic(bp, false, false);
8712 if (update_tpa)
8713 bnxt_set_ring_params(bp);
8714
8715 return bnxt_open_nic(bp, false, false);
8716 }
8717 if (update_tpa) {
8718 rc = bnxt_set_tpa(bp,
8719 (flags & BNXT_FLAG_TPA) ?
8720 true : false);
8721 if (rc)
8722 bp->flags = old_flags;
8723 }
8724 }
8725 return rc;
8726}
8727
Michael Chanffd77622018-11-15 03:25:40 -05008728static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type,
8729 u32 ring_id, u32 *prod, u32 *cons)
8730{
8731 struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr;
8732 struct hwrm_dbg_ring_info_get_input req = {0};
8733 int rc;
8734
8735 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1);
8736 req.ring_type = ring_type;
8737 req.fw_ring_id = cpu_to_le32(ring_id);
8738 mutex_lock(&bp->hwrm_cmd_lock);
8739 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
8740 if (!rc) {
8741 *prod = le32_to_cpu(resp->producer_index);
8742 *cons = le32_to_cpu(resp->consumer_index);
8743 }
8744 mutex_unlock(&bp->hwrm_cmd_lock);
8745 return rc;
8746}
8747
Michael Chan9f554592016-01-02 23:44:58 -05008748static void bnxt_dump_tx_sw_state(struct bnxt_napi *bnapi)
8749{
Michael Chanb6ab4b02016-01-02 23:44:59 -05008750 struct bnxt_tx_ring_info *txr = bnapi->tx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05008751 int i = bnapi->index;
8752
Michael Chan3b2b7d92016-01-02 23:45:00 -05008753 if (!txr)
8754 return;
8755
Michael Chan9f554592016-01-02 23:44:58 -05008756 netdev_info(bnapi->bp->dev, "[%d]: tx{fw_ring: %d prod: %x cons: %x}\n",
8757 i, txr->tx_ring_struct.fw_ring_id, txr->tx_prod,
8758 txr->tx_cons);
8759}
8760
8761static void bnxt_dump_rx_sw_state(struct bnxt_napi *bnapi)
8762{
Michael Chanb6ab4b02016-01-02 23:44:59 -05008763 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring;
Michael Chan9f554592016-01-02 23:44:58 -05008764 int i = bnapi->index;
8765
Michael Chan3b2b7d92016-01-02 23:45:00 -05008766 if (!rxr)
8767 return;
8768
Michael Chan9f554592016-01-02 23:44:58 -05008769 netdev_info(bnapi->bp->dev, "[%d]: rx{fw_ring: %d prod: %x} rx_agg{fw_ring: %d agg_prod: %x sw_agg_prod: %x}\n",
8770 i, rxr->rx_ring_struct.fw_ring_id, rxr->rx_prod,
8771 rxr->rx_agg_ring_struct.fw_ring_id, rxr->rx_agg_prod,
8772 rxr->rx_sw_agg_prod);
8773}
8774
8775static void bnxt_dump_cp_sw_state(struct bnxt_napi *bnapi)
8776{
8777 struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring;
8778 int i = bnapi->index;
8779
8780 netdev_info(bnapi->bp->dev, "[%d]: cp{fw_ring: %d raw_cons: %x}\n",
8781 i, cpr->cp_ring_struct.fw_ring_id, cpr->cp_raw_cons);
8782}
8783
Michael Chanc0c050c2015-10-22 16:01:17 -04008784static void bnxt_dbg_dump_states(struct bnxt *bp)
8785{
8786 int i;
8787 struct bnxt_napi *bnapi;
Michael Chanc0c050c2015-10-22 16:01:17 -04008788
8789 for (i = 0; i < bp->cp_nr_rings; i++) {
8790 bnapi = bp->bnapi[i];
Michael Chanc0c050c2015-10-22 16:01:17 -04008791 if (netif_msg_drv(bp)) {
Michael Chan9f554592016-01-02 23:44:58 -05008792 bnxt_dump_tx_sw_state(bnapi);
8793 bnxt_dump_rx_sw_state(bnapi);
8794 bnxt_dump_cp_sw_state(bnapi);
Michael Chanc0c050c2015-10-22 16:01:17 -04008795 }
8796 }
8797}
8798
Michael Chan6988bd92016-06-13 02:25:29 -04008799static void bnxt_reset_task(struct bnxt *bp, bool silent)
Michael Chanc0c050c2015-10-22 16:01:17 -04008800{
Michael Chan6988bd92016-06-13 02:25:29 -04008801 if (!silent)
8802 bnxt_dbg_dump_states(bp);
Michael Chan028de142015-12-09 19:35:44 -05008803 if (netif_running(bp->dev)) {
Michael Chanb386cd32017-03-08 18:44:33 -05008804 int rc;
8805
8806 if (!silent)
8807 bnxt_ulp_stop(bp);
Michael Chan028de142015-12-09 19:35:44 -05008808 bnxt_close_nic(bp, false, false);
Michael Chanb386cd32017-03-08 18:44:33 -05008809 rc = bnxt_open_nic(bp, false, false);
8810 if (!silent && !rc)
8811 bnxt_ulp_start(bp);
Michael Chan028de142015-12-09 19:35:44 -05008812 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008813}
8814
8815static void bnxt_tx_timeout(struct net_device *dev)
8816{
8817 struct bnxt *bp = netdev_priv(dev);
8818
8819 netdev_err(bp->dev, "TX timeout detected, starting reset task!\n");
8820 set_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04008821 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008822}
8823
Kees Cooke99e88a2017-10-16 14:43:17 -07008824static void bnxt_timer(struct timer_list *t)
Michael Chanc0c050c2015-10-22 16:01:17 -04008825{
Kees Cooke99e88a2017-10-16 14:43:17 -07008826 struct bnxt *bp = from_timer(bp, t, timer);
Michael Chanc0c050c2015-10-22 16:01:17 -04008827 struct net_device *dev = bp->dev;
8828
8829 if (!netif_running(dev))
8830 return;
8831
8832 if (atomic_read(&bp->intr_sem) != 0)
8833 goto bnxt_restart_timer;
8834
Michael Chanadcc3312017-07-24 12:34:24 -04008835 if (bp->link_info.link_up && (bp->flags & BNXT_FLAG_PORT_STATS) &&
8836 bp->stats_coal_ticks) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05008837 set_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04008838 bnxt_queue_sp_work(bp);
Michael Chan3bdf56c2016-03-07 15:38:45 -05008839 }
Sathya Perla5a84acb2017-10-26 11:51:31 -04008840
8841 if (bnxt_tc_flower_enabled(bp)) {
8842 set_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event);
8843 bnxt_queue_sp_work(bp);
8844 }
Michael Chana1ef4a72018-08-05 16:51:49 -04008845
8846 if (bp->link_info.phy_retry) {
8847 if (time_after(jiffies, bp->link_info.phy_retry_expires)) {
8848 bp->link_info.phy_retry = 0;
8849 netdev_warn(bp->dev, "failed to update phy settings after maximum retries.\n");
8850 } else {
8851 set_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event);
8852 bnxt_queue_sp_work(bp);
8853 }
8854 }
Michael Chanffd77622018-11-15 03:25:40 -05008855
8856 if ((bp->flags & BNXT_FLAG_CHIP_P5) && netif_carrier_ok(dev)) {
8857 set_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event);
8858 bnxt_queue_sp_work(bp);
8859 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008860bnxt_restart_timer:
8861 mod_timer(&bp->timer, jiffies + bp->current_interval);
8862}
8863
Michael Chana551ee92017-01-25 02:55:07 -05008864static void bnxt_rtnl_lock_sp(struct bnxt *bp)
Michael Chan6988bd92016-06-13 02:25:29 -04008865{
Michael Chana551ee92017-01-25 02:55:07 -05008866 /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
8867 * set. If the device is being closed, bnxt_close() may be holding
Michael Chan6988bd92016-06-13 02:25:29 -04008868 * rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
8869 * must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
8870 */
8871 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
8872 rtnl_lock();
Michael Chana551ee92017-01-25 02:55:07 -05008873}
8874
8875static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
8876{
Michael Chan6988bd92016-06-13 02:25:29 -04008877 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
8878 rtnl_unlock();
8879}
8880
Michael Chana551ee92017-01-25 02:55:07 -05008881/* Only called from bnxt_sp_task() */
8882static void bnxt_reset(struct bnxt *bp, bool silent)
8883{
8884 bnxt_rtnl_lock_sp(bp);
8885 if (test_bit(BNXT_STATE_OPEN, &bp->state))
8886 bnxt_reset_task(bp, silent);
8887 bnxt_rtnl_unlock_sp(bp);
8888}
8889
Michael Chanffd77622018-11-15 03:25:40 -05008890static void bnxt_chk_missed_irq(struct bnxt *bp)
8891{
8892 int i;
8893
8894 if (!(bp->flags & BNXT_FLAG_CHIP_P5))
8895 return;
8896
8897 for (i = 0; i < bp->cp_nr_rings; i++) {
8898 struct bnxt_napi *bnapi = bp->bnapi[i];
8899 struct bnxt_cp_ring_info *cpr;
8900 u32 fw_ring_id;
8901 int j;
8902
8903 if (!bnapi)
8904 continue;
8905
8906 cpr = &bnapi->cp_ring;
8907 for (j = 0; j < 2; j++) {
8908 struct bnxt_cp_ring_info *cpr2 = cpr->cp_ring_arr[j];
8909 u32 val[2];
8910
8911 if (!cpr2 || cpr2->has_more_work ||
8912 !bnxt_has_work(bp, cpr2))
8913 continue;
8914
8915 if (cpr2->cp_raw_cons != cpr2->last_cp_raw_cons) {
8916 cpr2->last_cp_raw_cons = cpr2->cp_raw_cons;
8917 continue;
8918 }
8919 fw_ring_id = cpr2->cp_ring_struct.fw_ring_id;
8920 bnxt_dbg_hwrm_ring_info_get(bp,
8921 DBG_RING_INFO_GET_REQ_RING_TYPE_L2_CMPL,
8922 fw_ring_id, &val[0], &val[1]);
Michael Chan83eb5c52018-11-15 03:25:41 -05008923 cpr->missed_irqs++;
Michael Chanffd77622018-11-15 03:25:40 -05008924 }
8925 }
8926}
8927
Michael Chanc0c050c2015-10-22 16:01:17 -04008928static void bnxt_cfg_ntp_filters(struct bnxt *);
8929
8930static void bnxt_sp_task(struct work_struct *work)
8931{
8932 struct bnxt *bp = container_of(work, struct bnxt, sp_task);
Michael Chanc0c050c2015-10-22 16:01:17 -04008933
Michael Chan4cebdce2015-12-09 19:35:43 -05008934 set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
8935 smp_mb__after_atomic();
8936 if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
8937 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04008938 return;
Michael Chan4cebdce2015-12-09 19:35:43 -05008939 }
Michael Chanc0c050c2015-10-22 16:01:17 -04008940
8941 if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
8942 bnxt_cfg_rx_mode(bp);
8943
8944 if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
8945 bnxt_cfg_ntp_filters(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04008946 if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
8947 bnxt_hwrm_exec_fwd_req(bp);
8948 if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
8949 bnxt_hwrm_tunnel_dst_port_alloc(
8950 bp, bp->vxlan_port,
8951 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8952 }
8953 if (test_and_clear_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event)) {
8954 bnxt_hwrm_tunnel_dst_port_free(
8955 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
8956 }
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07008957 if (test_and_clear_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event)) {
8958 bnxt_hwrm_tunnel_dst_port_alloc(
8959 bp, bp->nge_port,
8960 TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8961 }
8962 if (test_and_clear_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event)) {
8963 bnxt_hwrm_tunnel_dst_port_free(
8964 bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
8965 }
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04008966 if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event)) {
Michael Chan3bdf56c2016-03-07 15:38:45 -05008967 bnxt_hwrm_port_qstats(bp);
Vasundhara Volam00db3cb2018-03-31 13:54:12 -04008968 bnxt_hwrm_port_qstats_ext(bp);
8969 }
Michael Chan3bdf56c2016-03-07 15:38:45 -05008970
Michael Chan0eaa24b2017-01-25 02:55:08 -05008971 if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
Michael Chane2dc9b62017-10-13 21:09:30 -04008972 int rc;
Michael Chan0eaa24b2017-01-25 02:55:08 -05008973
Michael Chane2dc9b62017-10-13 21:09:30 -04008974 mutex_lock(&bp->link_lock);
Michael Chan0eaa24b2017-01-25 02:55:08 -05008975 if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
8976 &bp->sp_event))
8977 bnxt_hwrm_phy_qcaps(bp);
8978
Michael Chane2dc9b62017-10-13 21:09:30 -04008979 rc = bnxt_update_link(bp, true);
8980 mutex_unlock(&bp->link_lock);
Michael Chan0eaa24b2017-01-25 02:55:08 -05008981 if (rc)
8982 netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
8983 rc);
8984 }
Michael Chana1ef4a72018-08-05 16:51:49 -04008985 if (test_and_clear_bit(BNXT_UPDATE_PHY_SP_EVENT, &bp->sp_event)) {
8986 int rc;
8987
8988 mutex_lock(&bp->link_lock);
8989 rc = bnxt_update_phy_setting(bp);
8990 mutex_unlock(&bp->link_lock);
8991 if (rc) {
8992 netdev_warn(bp->dev, "update phy settings retry failed\n");
8993 } else {
8994 bp->link_info.phy_retry = false;
8995 netdev_info(bp->dev, "update phy settings retry succeeded\n");
8996 }
8997 }
Michael Chan90c694b2017-01-25 02:55:09 -05008998 if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
Michael Chane2dc9b62017-10-13 21:09:30 -04008999 mutex_lock(&bp->link_lock);
9000 bnxt_get_port_module_status(bp);
9001 mutex_unlock(&bp->link_lock);
Michael Chan90c694b2017-01-25 02:55:09 -05009002 }
Sathya Perla5a84acb2017-10-26 11:51:31 -04009003
9004 if (test_and_clear_bit(BNXT_FLOW_STATS_SP_EVENT, &bp->sp_event))
9005 bnxt_tc_flow_stats_work(bp);
9006
Michael Chanffd77622018-11-15 03:25:40 -05009007 if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
9008 bnxt_chk_missed_irq(bp);
9009
Michael Chane2dc9b62017-10-13 21:09:30 -04009010 /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
9011 * must be the last functions to be called before exiting.
9012 */
Michael Chanc0c050c2015-10-22 16:01:17 -04009013 if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
9014 bnxt_reset(bp, false);
9015
9016 if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
9017 bnxt_reset(bp, true);
9018
Michael Chanc0c050c2015-10-22 16:01:17 -04009019 smp_mb__before_atomic();
9020 clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
9021}
9022
Michael Chand1e79252017-02-06 16:55:38 -05009023/* Under rtnl_lock */
Michael Chan98fdbe72017-08-28 13:40:26 -04009024int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs,
9025 int tx_xdp)
Michael Chand1e79252017-02-06 16:55:38 -05009026{
9027 int max_rx, max_tx, tx_sets = 1;
9028 int tx_rings_needed;
Michael Chan8f23d632018-01-17 03:21:12 -05009029 int rx_rings = rx;
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05009030 int cp, vnics, rc;
Michael Chand1e79252017-02-06 16:55:38 -05009031
Michael Chand1e79252017-02-06 16:55:38 -05009032 if (tcs)
9033 tx_sets = tcs;
9034
9035 rc = bnxt_get_max_rings(bp, &max_rx, &max_tx, sh);
9036 if (rc)
9037 return rc;
9038
9039 if (max_rx < rx)
9040 return -ENOMEM;
9041
Michael Chan5f449242017-02-06 16:55:40 -05009042 tx_rings_needed = tx * tx_sets + tx_xdp;
Michael Chand1e79252017-02-06 16:55:38 -05009043 if (max_tx < tx_rings_needed)
9044 return -ENOMEM;
9045
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05009046 vnics = 1;
9047 if (bp->flags & BNXT_FLAG_RFS)
9048 vnics += rx_rings;
9049
Michael Chan8f23d632018-01-17 03:21:12 -05009050 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9051 rx_rings <<= 1;
9052 cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx;
Michael Chanf1ca94d2018-08-05 16:51:53 -04009053 if (BNXT_NEW_RM(bp))
Michael Chan11c3ec72018-04-11 11:50:17 -04009054 cp += bnxt_get_ulp_msix_num(bp);
Eddie Wai6fc2ffd2018-03-09 23:46:04 -05009055 return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp,
9056 vnics);
Michael Chand1e79252017-02-06 16:55:38 -05009057}
9058
Sathya Perla17086392017-02-20 19:25:18 -05009059static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev)
9060{
9061 if (bp->bar2) {
9062 pci_iounmap(pdev, bp->bar2);
9063 bp->bar2 = NULL;
9064 }
9065
9066 if (bp->bar1) {
9067 pci_iounmap(pdev, bp->bar1);
9068 bp->bar1 = NULL;
9069 }
9070
9071 if (bp->bar0) {
9072 pci_iounmap(pdev, bp->bar0);
9073 bp->bar0 = NULL;
9074 }
9075}
9076
9077static void bnxt_cleanup_pci(struct bnxt *bp)
9078{
9079 bnxt_unmap_bars(bp, bp->pdev);
9080 pci_release_regions(bp->pdev);
9081 pci_disable_device(bp->pdev);
9082}
9083
Michael Chan18775aa2017-10-26 11:51:27 -04009084static void bnxt_init_dflt_coal(struct bnxt *bp)
9085{
9086 struct bnxt_coal *coal;
9087
9088 /* Tick values in micro seconds.
9089 * 1 coal_buf x bufs_per_record = 1 completion record.
9090 */
9091 coal = &bp->rx_coal;
9092 coal->coal_ticks = 14;
9093 coal->coal_bufs = 30;
9094 coal->coal_ticks_irq = 1;
9095 coal->coal_bufs_irq = 2;
Andy Gospodarek05abe4dd2018-04-26 17:44:38 -04009096 coal->idle_thresh = 50;
Michael Chan18775aa2017-10-26 11:51:27 -04009097 coal->bufs_per_record = 2;
9098 coal->budget = 64; /* NAPI budget */
9099
9100 coal = &bp->tx_coal;
9101 coal->coal_ticks = 28;
9102 coal->coal_bufs = 30;
9103 coal->coal_ticks_irq = 2;
9104 coal->coal_bufs_irq = 2;
9105 coal->bufs_per_record = 1;
9106
9107 bp->stats_coal_ticks = BNXT_DEF_STATS_COAL_TICKS;
9108}
9109
Michael Chanc0c050c2015-10-22 16:01:17 -04009110static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
9111{
9112 int rc;
9113 struct bnxt *bp = netdev_priv(dev);
9114
9115 SET_NETDEV_DEV(dev, &pdev->dev);
9116
9117 /* enable device (incl. PCI PM wakeup), and bus-mastering */
9118 rc = pci_enable_device(pdev);
9119 if (rc) {
9120 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
9121 goto init_err;
9122 }
9123
9124 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9125 dev_err(&pdev->dev,
9126 "Cannot find PCI device base address, aborting\n");
9127 rc = -ENODEV;
9128 goto init_err_disable;
9129 }
9130
9131 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9132 if (rc) {
9133 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
9134 goto init_err_disable;
9135 }
9136
9137 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)) != 0 &&
9138 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
9139 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
9140 goto init_err_disable;
9141 }
9142
9143 pci_set_master(pdev);
9144
9145 bp->dev = dev;
9146 bp->pdev = pdev;
9147
9148 bp->bar0 = pci_ioremap_bar(pdev, 0);
9149 if (!bp->bar0) {
9150 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
9151 rc = -ENOMEM;
9152 goto init_err_release;
9153 }
9154
9155 bp->bar1 = pci_ioremap_bar(pdev, 2);
9156 if (!bp->bar1) {
9157 dev_err(&pdev->dev, "Cannot map doorbell registers, aborting\n");
9158 rc = -ENOMEM;
9159 goto init_err_release;
9160 }
9161
9162 bp->bar2 = pci_ioremap_bar(pdev, 4);
9163 if (!bp->bar2) {
9164 dev_err(&pdev->dev, "Cannot map bar4 registers, aborting\n");
9165 rc = -ENOMEM;
9166 goto init_err_release;
9167 }
9168
Satish Baddipadige6316ea62016-03-07 15:38:48 -05009169 pci_enable_pcie_error_reporting(pdev);
9170
Michael Chanc0c050c2015-10-22 16:01:17 -04009171 INIT_WORK(&bp->sp_task, bnxt_sp_task);
9172
9173 spin_lock_init(&bp->ntp_fltr_lock);
Michael Chan697197e2018-10-14 07:02:46 -04009174#if BITS_PER_LONG == 32
9175 spin_lock_init(&bp->db_lock);
9176#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04009177
9178 bp->rx_ring_size = BNXT_DEFAULT_RX_RING_SIZE;
9179 bp->tx_ring_size = BNXT_DEFAULT_TX_RING_SIZE;
9180
Michael Chan18775aa2017-10-26 11:51:27 -04009181 bnxt_init_dflt_coal(bp);
Michael Chan51f30782016-07-01 18:46:29 -04009182
Kees Cooke99e88a2017-10-16 14:43:17 -07009183 timer_setup(&bp->timer, bnxt_timer, 0);
Michael Chanc0c050c2015-10-22 16:01:17 -04009184 bp->current_interval = BNXT_TIMER_INTERVAL;
9185
Michael Chancaefe522015-12-09 19:35:42 -05009186 clear_bit(BNXT_STATE_OPEN, &bp->state);
Michael Chanc0c050c2015-10-22 16:01:17 -04009187 return 0;
9188
9189init_err_release:
Sathya Perla17086392017-02-20 19:25:18 -05009190 bnxt_unmap_bars(bp, pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04009191 pci_release_regions(pdev);
9192
9193init_err_disable:
9194 pci_disable_device(pdev);
9195
9196init_err:
9197 return rc;
9198}
9199
9200/* rtnl_lock held */
9201static int bnxt_change_mac_addr(struct net_device *dev, void *p)
9202{
9203 struct sockaddr *addr = p;
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05009204 struct bnxt *bp = netdev_priv(dev);
9205 int rc = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04009206
9207 if (!is_valid_ether_addr(addr->sa_data))
9208 return -EADDRNOTAVAIL;
9209
Michael Chanc1a7bdf2017-10-26 11:51:24 -04009210 if (ether_addr_equal(addr->sa_data, dev->dev_addr))
9211 return 0;
9212
Michael Chan28ea3342018-09-14 15:41:29 -04009213 rc = bnxt_approve_mac(bp, addr->sa_data, true);
Michael Chan84c33dd2016-04-11 04:11:13 -04009214 if (rc)
9215 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009216
Jeffrey Huang1fc2cfd2015-12-02 01:54:06 -05009217 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9218 if (netif_running(dev)) {
9219 bnxt_close_nic(bp, false, false);
9220 rc = bnxt_open_nic(bp, false, false);
9221 }
9222
9223 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009224}
9225
9226/* rtnl_lock held */
9227static int bnxt_change_mtu(struct net_device *dev, int new_mtu)
9228{
9229 struct bnxt *bp = netdev_priv(dev);
9230
Michael Chanc0c050c2015-10-22 16:01:17 -04009231 if (netif_running(dev))
9232 bnxt_close_nic(bp, false, false);
9233
9234 dev->mtu = new_mtu;
9235 bnxt_set_ring_params(bp);
9236
9237 if (netif_running(dev))
9238 return bnxt_open_nic(bp, false, false);
9239
9240 return 0;
9241}
9242
Michael Chanc5e3deb2016-12-02 21:17:15 -05009243int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
Michael Chanc0c050c2015-10-22 16:01:17 -04009244{
9245 struct bnxt *bp = netdev_priv(dev);
Michael Chan3ffb6a32016-11-11 00:11:42 -05009246 bool sh = false;
Michael Chand1e79252017-02-06 16:55:38 -05009247 int rc;
John Fastabend16e5cc62016-02-16 21:16:43 -08009248
Michael Chanc0c050c2015-10-22 16:01:17 -04009249 if (tc > bp->max_tc) {
Michael Chanb451c8b2017-02-12 19:18:17 -05009250 netdev_err(dev, "Too many traffic classes requested: %d. Max supported is %d.\n",
Michael Chanc0c050c2015-10-22 16:01:17 -04009251 tc, bp->max_tc);
9252 return -EINVAL;
9253 }
9254
9255 if (netdev_get_num_tc(dev) == tc)
9256 return 0;
9257
Michael Chan3ffb6a32016-11-11 00:11:42 -05009258 if (bp->flags & BNXT_FLAG_SHARED_RINGS)
9259 sh = true;
9260
Michael Chan98fdbe72017-08-28 13:40:26 -04009261 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
9262 sh, tc, bp->tx_nr_rings_xdp);
Michael Chand1e79252017-02-06 16:55:38 -05009263 if (rc)
9264 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009265
9266 /* Needs to close the device and do hw resource re-allocations */
9267 if (netif_running(bp->dev))
9268 bnxt_close_nic(bp, true, false);
9269
9270 if (tc) {
9271 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
9272 netdev_set_num_tc(dev, tc);
9273 } else {
9274 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9275 netdev_reset_tc(dev);
9276 }
Michael Chan87e9b372017-08-23 19:34:03 -04009277 bp->tx_nr_rings += bp->tx_nr_rings_xdp;
Michael Chan3ffb6a32016-11-11 00:11:42 -05009278 bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) :
9279 bp->tx_nr_rings + bp->rx_nr_rings;
Michael Chanc0c050c2015-10-22 16:01:17 -04009280 bp->num_stat_ctxs = bp->cp_nr_rings;
9281
9282 if (netif_running(bp->dev))
9283 return bnxt_open_nic(bp, true, false);
9284
9285 return 0;
9286}
9287
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009288static int bnxt_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
9289 void *cb_priv)
Sathya Perla2ae74082017-08-28 13:40:33 -04009290{
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009291 struct bnxt *bp = cb_priv;
Sathya Perla2ae74082017-08-28 13:40:33 -04009292
Jakub Kicinski312324f2018-01-25 14:00:48 -08009293 if (!bnxt_tc_flower_enabled(bp) ||
9294 !tc_cls_can_offload_and_chain0(bp->dev, type_data))
Sathya Perla2ae74082017-08-28 13:40:33 -04009295 return -EOPNOTSUPP;
9296
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009297 switch (type) {
9298 case TC_SETUP_CLSFLOWER:
9299 return bnxt_tc_setup_flower(bp, bp->pf.fw_fid, type_data);
9300 default:
9301 return -EOPNOTSUPP;
9302 }
9303}
9304
9305static int bnxt_setup_tc_block(struct net_device *dev,
9306 struct tc_block_offload *f)
9307{
9308 struct bnxt *bp = netdev_priv(dev);
9309
9310 if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
9311 return -EOPNOTSUPP;
9312
9313 switch (f->command) {
9314 case TC_BLOCK_BIND:
9315 return tcf_block_cb_register(f->block, bnxt_setup_tc_block_cb,
John Hurley60513bd2018-06-25 14:30:04 -07009316 bp, bp, f->extack);
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009317 case TC_BLOCK_UNBIND:
9318 tcf_block_cb_unregister(f->block, bnxt_setup_tc_block_cb, bp);
9319 return 0;
9320 default:
9321 return -EOPNOTSUPP;
9322 }
Sathya Perla2ae74082017-08-28 13:40:33 -04009323}
9324
Jiri Pirko2572ac52017-08-07 10:15:17 +02009325static int bnxt_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02009326 void *type_data)
Michael Chanc5e3deb2016-12-02 21:17:15 -05009327{
Sathya Perla2ae74082017-08-28 13:40:33 -04009328 switch (type) {
Jiri Pirko9e0fd152017-10-19 15:50:39 +02009329 case TC_SETUP_BLOCK:
9330 return bnxt_setup_tc_block(dev, type_data);
Nogah Frankel575ed7d2017-11-06 07:23:42 +01009331 case TC_SETUP_QDISC_MQPRIO: {
Sathya Perla2ae74082017-08-28 13:40:33 -04009332 struct tc_mqprio_qopt *mqprio = type_data;
Jiri Pirkode4784c2017-08-07 10:15:32 +02009333
Sathya Perla2ae74082017-08-28 13:40:33 -04009334 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
9335
9336 return bnxt_setup_mq_tc(dev, mqprio->num_tc);
9337 }
9338 default:
Jiri Pirko38cf0422017-08-07 10:15:31 +02009339 return -EOPNOTSUPP;
Sathya Perla2ae74082017-08-28 13:40:33 -04009340 }
Michael Chanc5e3deb2016-12-02 21:17:15 -05009341}
9342
Michael Chanc0c050c2015-10-22 16:01:17 -04009343#ifdef CONFIG_RFS_ACCEL
9344static bool bnxt_fltr_match(struct bnxt_ntuple_filter *f1,
9345 struct bnxt_ntuple_filter *f2)
9346{
9347 struct flow_keys *keys1 = &f1->fkeys;
9348 struct flow_keys *keys2 = &f2->fkeys;
9349
9350 if (keys1->addrs.v4addrs.src == keys2->addrs.v4addrs.src &&
9351 keys1->addrs.v4addrs.dst == keys2->addrs.v4addrs.dst &&
9352 keys1->ports.ports == keys2->ports.ports &&
9353 keys1->basic.ip_proto == keys2->basic.ip_proto &&
9354 keys1->basic.n_proto == keys2->basic.n_proto &&
Michael Chan61aad722017-02-12 19:18:14 -05009355 keys1->control.flags == keys2->control.flags &&
Michael Chana54c4d72016-07-25 12:33:35 -04009356 ether_addr_equal(f1->src_mac_addr, f2->src_mac_addr) &&
9357 ether_addr_equal(f1->dst_mac_addr, f2->dst_mac_addr))
Michael Chanc0c050c2015-10-22 16:01:17 -04009358 return true;
9359
9360 return false;
9361}
9362
9363static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
9364 u16 rxq_index, u32 flow_id)
9365{
9366 struct bnxt *bp = netdev_priv(dev);
9367 struct bnxt_ntuple_filter *fltr, *new_fltr;
9368 struct flow_keys *fkeys;
9369 struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);
Michael Chana54c4d72016-07-25 12:33:35 -04009370 int rc = 0, idx, bit_id, l2_idx = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04009371 struct hlist_head *head;
9372
Michael Chana54c4d72016-07-25 12:33:35 -04009373 if (!ether_addr_equal(dev->dev_addr, eth->h_dest)) {
9374 struct bnxt_vnic_info *vnic = &bp->vnic_info[0];
9375 int off = 0, j;
9376
9377 netif_addr_lock_bh(dev);
9378 for (j = 0; j < vnic->uc_filter_count; j++, off += ETH_ALEN) {
9379 if (ether_addr_equal(eth->h_dest,
9380 vnic->uc_list + off)) {
9381 l2_idx = j + 1;
9382 break;
9383 }
9384 }
9385 netif_addr_unlock_bh(dev);
9386 if (!l2_idx)
9387 return -EINVAL;
9388 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009389 new_fltr = kzalloc(sizeof(*new_fltr), GFP_ATOMIC);
9390 if (!new_fltr)
9391 return -ENOMEM;
9392
9393 fkeys = &new_fltr->fkeys;
9394 if (!skb_flow_dissect_flow_keys(skb, fkeys, 0)) {
9395 rc = -EPROTONOSUPPORT;
9396 goto err_free;
9397 }
9398
Michael Chandda0e742016-12-29 12:13:40 -05009399 if ((fkeys->basic.n_proto != htons(ETH_P_IP) &&
9400 fkeys->basic.n_proto != htons(ETH_P_IPV6)) ||
Michael Chanc0c050c2015-10-22 16:01:17 -04009401 ((fkeys->basic.ip_proto != IPPROTO_TCP) &&
9402 (fkeys->basic.ip_proto != IPPROTO_UDP))) {
9403 rc = -EPROTONOSUPPORT;
9404 goto err_free;
9405 }
Michael Chandda0e742016-12-29 12:13:40 -05009406 if (fkeys->basic.n_proto == htons(ETH_P_IPV6) &&
9407 bp->hwrm_spec_code < 0x10601) {
9408 rc = -EPROTONOSUPPORT;
9409 goto err_free;
9410 }
Michael Chan61aad722017-02-12 19:18:14 -05009411 if ((fkeys->control.flags & FLOW_DIS_ENCAPSULATION) &&
9412 bp->hwrm_spec_code < 0x10601) {
9413 rc = -EPROTONOSUPPORT;
9414 goto err_free;
9415 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009416
Michael Chana54c4d72016-07-25 12:33:35 -04009417 memcpy(new_fltr->dst_mac_addr, eth->h_dest, ETH_ALEN);
Michael Chanc0c050c2015-10-22 16:01:17 -04009418 memcpy(new_fltr->src_mac_addr, eth->h_source, ETH_ALEN);
9419
9420 idx = skb_get_hash_raw(skb) & BNXT_NTP_FLTR_HASH_MASK;
9421 head = &bp->ntp_fltr_hash_tbl[idx];
9422 rcu_read_lock();
9423 hlist_for_each_entry_rcu(fltr, head, hash) {
9424 if (bnxt_fltr_match(fltr, new_fltr)) {
9425 rcu_read_unlock();
9426 rc = 0;
9427 goto err_free;
9428 }
9429 }
9430 rcu_read_unlock();
9431
9432 spin_lock_bh(&bp->ntp_fltr_lock);
Michael Chan84e86b92015-11-05 16:25:50 -05009433 bit_id = bitmap_find_free_region(bp->ntp_fltr_bmap,
9434 BNXT_NTP_FLTR_MAX_FLTR, 0);
9435 if (bit_id < 0) {
Michael Chanc0c050c2015-10-22 16:01:17 -04009436 spin_unlock_bh(&bp->ntp_fltr_lock);
9437 rc = -ENOMEM;
9438 goto err_free;
9439 }
9440
Michael Chan84e86b92015-11-05 16:25:50 -05009441 new_fltr->sw_id = (u16)bit_id;
Michael Chanc0c050c2015-10-22 16:01:17 -04009442 new_fltr->flow_id = flow_id;
Michael Chana54c4d72016-07-25 12:33:35 -04009443 new_fltr->l2_fltr_idx = l2_idx;
Michael Chanc0c050c2015-10-22 16:01:17 -04009444 new_fltr->rxq = rxq_index;
9445 hlist_add_head_rcu(&new_fltr->hash, head);
9446 bp->ntp_fltr_count++;
9447 spin_unlock_bh(&bp->ntp_fltr_lock);
9448
9449 set_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04009450 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009451
9452 return new_fltr->sw_id;
9453
9454err_free:
9455 kfree(new_fltr);
9456 return rc;
9457}
9458
9459static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9460{
9461 int i;
9462
9463 for (i = 0; i < BNXT_NTP_FLTR_HASH_SIZE; i++) {
9464 struct hlist_head *head;
9465 struct hlist_node *tmp;
9466 struct bnxt_ntuple_filter *fltr;
9467 int rc;
9468
9469 head = &bp->ntp_fltr_hash_tbl[i];
9470 hlist_for_each_entry_safe(fltr, tmp, head, hash) {
9471 bool del = false;
9472
9473 if (test_bit(BNXT_FLTR_VALID, &fltr->state)) {
9474 if (rps_may_expire_flow(bp->dev, fltr->rxq,
9475 fltr->flow_id,
9476 fltr->sw_id)) {
9477 bnxt_hwrm_cfa_ntuple_filter_free(bp,
9478 fltr);
9479 del = true;
9480 }
9481 } else {
9482 rc = bnxt_hwrm_cfa_ntuple_filter_alloc(bp,
9483 fltr);
9484 if (rc)
9485 del = true;
9486 else
9487 set_bit(BNXT_FLTR_VALID, &fltr->state);
9488 }
9489
9490 if (del) {
9491 spin_lock_bh(&bp->ntp_fltr_lock);
9492 hlist_del_rcu(&fltr->hash);
9493 bp->ntp_fltr_count--;
9494 spin_unlock_bh(&bp->ntp_fltr_lock);
9495 synchronize_rcu();
9496 clear_bit(fltr->sw_id, bp->ntp_fltr_bmap);
9497 kfree(fltr);
9498 }
9499 }
9500 }
Jeffrey Huang19241362016-02-26 04:00:00 -05009501 if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
9502 netdev_info(bp->dev, "Receive PF driver unload event!");
Michael Chanc0c050c2015-10-22 16:01:17 -04009503}
9504
9505#else
9506
9507static void bnxt_cfg_ntp_filters(struct bnxt *bp)
9508{
9509}
9510
9511#endif /* CONFIG_RFS_ACCEL */
9512
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009513static void bnxt_udp_tunnel_add(struct net_device *dev,
9514 struct udp_tunnel_info *ti)
Michael Chanc0c050c2015-10-22 16:01:17 -04009515{
9516 struct bnxt *bp = netdev_priv(dev);
9517
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009518 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9519 return;
9520
Michael Chanc0c050c2015-10-22 16:01:17 -04009521 if (!netif_running(dev))
9522 return;
9523
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009524 switch (ti->type) {
9525 case UDP_TUNNEL_TYPE_VXLAN:
9526 if (bp->vxlan_port_cnt && bp->vxlan_port != ti->port)
9527 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04009528
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009529 bp->vxlan_port_cnt++;
9530 if (bp->vxlan_port_cnt == 1) {
9531 bp->vxlan_port = ti->port;
9532 set_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event);
Michael Chanc213eae2017-10-13 21:09:29 -04009533 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009534 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009535 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07009536 case UDP_TUNNEL_TYPE_GENEVE:
9537 if (bp->nge_port_cnt && bp->nge_port != ti->port)
9538 return;
9539
9540 bp->nge_port_cnt++;
9541 if (bp->nge_port_cnt == 1) {
9542 bp->nge_port = ti->port;
9543 set_bit(BNXT_GENEVE_ADD_PORT_SP_EVENT, &bp->sp_event);
9544 }
9545 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009546 default:
9547 return;
Michael Chanc0c050c2015-10-22 16:01:17 -04009548 }
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009549
Michael Chanc213eae2017-10-13 21:09:29 -04009550 bnxt_queue_sp_work(bp);
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009551}
9552
9553static void bnxt_udp_tunnel_del(struct net_device *dev,
9554 struct udp_tunnel_info *ti)
9555{
9556 struct bnxt *bp = netdev_priv(dev);
9557
9558 if (ti->sa_family != AF_INET6 && ti->sa_family != AF_INET)
9559 return;
9560
9561 if (!netif_running(dev))
9562 return;
9563
9564 switch (ti->type) {
9565 case UDP_TUNNEL_TYPE_VXLAN:
9566 if (!bp->vxlan_port_cnt || bp->vxlan_port != ti->port)
9567 return;
9568 bp->vxlan_port_cnt--;
9569
9570 if (bp->vxlan_port_cnt != 0)
9571 return;
9572
9573 set_bit(BNXT_VXLAN_DEL_PORT_SP_EVENT, &bp->sp_event);
9574 break;
Alexander Duyck7cdd5fc2016-06-16 12:21:36 -07009575 case UDP_TUNNEL_TYPE_GENEVE:
9576 if (!bp->nge_port_cnt || bp->nge_port != ti->port)
9577 return;
9578 bp->nge_port_cnt--;
9579
9580 if (bp->nge_port_cnt != 0)
9581 return;
9582
9583 set_bit(BNXT_GENEVE_DEL_PORT_SP_EVENT, &bp->sp_event);
9584 break;
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009585 default:
9586 return;
9587 }
9588
Michael Chanc213eae2017-10-13 21:09:29 -04009589 bnxt_queue_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009590}
9591
Michael Chan39d8ba22017-07-24 12:34:22 -04009592static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
9593 struct net_device *dev, u32 filter_mask,
9594 int nlflags)
9595{
9596 struct bnxt *bp = netdev_priv(dev);
9597
9598 return ndo_dflt_bridge_getlink(skb, pid, seq, dev, bp->br_mode, 0, 0,
9599 nlflags, filter_mask, NULL);
9600}
9601
9602static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
9603 u16 flags)
9604{
9605 struct bnxt *bp = netdev_priv(dev);
9606 struct nlattr *attr, *br_spec;
9607 int rem, rc = 0;
9608
9609 if (bp->hwrm_spec_code < 0x10708 || !BNXT_SINGLE_PF(bp))
9610 return -EOPNOTSUPP;
9611
9612 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
9613 if (!br_spec)
9614 return -EINVAL;
9615
9616 nla_for_each_nested(attr, br_spec, rem) {
9617 u16 mode;
9618
9619 if (nla_type(attr) != IFLA_BRIDGE_MODE)
9620 continue;
9621
9622 if (nla_len(attr) < sizeof(mode))
9623 return -EINVAL;
9624
9625 mode = nla_get_u16(attr);
9626 if (mode == bp->br_mode)
9627 break;
9628
9629 rc = bnxt_hwrm_set_br_mode(bp, mode);
9630 if (!rc)
9631 bp->br_mode = mode;
9632 break;
9633 }
9634 return rc;
9635}
9636
Sathya Perlac124a622017-07-24 12:34:29 -04009637static int bnxt_get_phys_port_name(struct net_device *dev, char *buf,
9638 size_t len)
9639{
9640 struct bnxt *bp = netdev_priv(dev);
9641 int rc;
9642
9643 /* The PF and it's VF-reps only support the switchdev framework */
9644 if (!BNXT_PF(bp))
9645 return -EOPNOTSUPP;
9646
Sathya Perla53f70b82017-07-25 13:28:41 -04009647 rc = snprintf(buf, len, "p%d", bp->pf.port_id);
Sathya Perlac124a622017-07-24 12:34:29 -04009648
9649 if (rc >= len)
9650 return -EOPNOTSUPP;
9651 return 0;
9652}
9653
9654int bnxt_port_attr_get(struct bnxt *bp, struct switchdev_attr *attr)
9655{
9656 if (bp->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
9657 return -EOPNOTSUPP;
9658
9659 /* The PF and it's VF-reps only support the switchdev framework */
9660 if (!BNXT_PF(bp))
9661 return -EOPNOTSUPP;
9662
9663 switch (attr->id) {
9664 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
Sathya Perladd4ea1d2018-01-17 03:21:16 -05009665 attr->u.ppid.id_len = sizeof(bp->switch_id);
9666 memcpy(attr->u.ppid.id, bp->switch_id, attr->u.ppid.id_len);
Sathya Perlac124a622017-07-24 12:34:29 -04009667 break;
9668 default:
9669 return -EOPNOTSUPP;
9670 }
9671 return 0;
9672}
9673
9674static int bnxt_swdev_port_attr_get(struct net_device *dev,
9675 struct switchdev_attr *attr)
9676{
9677 return bnxt_port_attr_get(netdev_priv(dev), attr);
9678}
9679
9680static const struct switchdev_ops bnxt_switchdev_ops = {
9681 .switchdev_port_attr_get = bnxt_swdev_port_attr_get
9682};
9683
Michael Chanc0c050c2015-10-22 16:01:17 -04009684static const struct net_device_ops bnxt_netdev_ops = {
9685 .ndo_open = bnxt_open,
9686 .ndo_start_xmit = bnxt_start_xmit,
9687 .ndo_stop = bnxt_close,
9688 .ndo_get_stats64 = bnxt_get_stats64,
9689 .ndo_set_rx_mode = bnxt_set_rx_mode,
9690 .ndo_do_ioctl = bnxt_ioctl,
9691 .ndo_validate_addr = eth_validate_addr,
9692 .ndo_set_mac_address = bnxt_change_mac_addr,
9693 .ndo_change_mtu = bnxt_change_mtu,
9694 .ndo_fix_features = bnxt_fix_features,
9695 .ndo_set_features = bnxt_set_features,
9696 .ndo_tx_timeout = bnxt_tx_timeout,
9697#ifdef CONFIG_BNXT_SRIOV
9698 .ndo_get_vf_config = bnxt_get_vf_config,
9699 .ndo_set_vf_mac = bnxt_set_vf_mac,
9700 .ndo_set_vf_vlan = bnxt_set_vf_vlan,
9701 .ndo_set_vf_rate = bnxt_set_vf_bw,
9702 .ndo_set_vf_link_state = bnxt_set_vf_link_state,
9703 .ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
Vasundhara Volam746df132018-03-31 13:54:10 -04009704 .ndo_set_vf_trust = bnxt_set_vf_trust,
Michael Chanc0c050c2015-10-22 16:01:17 -04009705#endif
Michael Chanc0c050c2015-10-22 16:01:17 -04009706 .ndo_setup_tc = bnxt_setup_tc,
9707#ifdef CONFIG_RFS_ACCEL
9708 .ndo_rx_flow_steer = bnxt_rx_flow_steer,
9709#endif
Alexander Duyckad51b8e2016-06-16 12:21:19 -07009710 .ndo_udp_tunnel_add = bnxt_udp_tunnel_add,
9711 .ndo_udp_tunnel_del = bnxt_udp_tunnel_del,
Jakub Kicinskif4e63522017-11-03 13:56:16 -07009712 .ndo_bpf = bnxt_xdp,
Michael Chan39d8ba22017-07-24 12:34:22 -04009713 .ndo_bridge_getlink = bnxt_bridge_getlink,
9714 .ndo_bridge_setlink = bnxt_bridge_setlink,
Sathya Perlac124a622017-07-24 12:34:29 -04009715 .ndo_get_phys_port_name = bnxt_get_phys_port_name
Michael Chanc0c050c2015-10-22 16:01:17 -04009716};
9717
9718static void bnxt_remove_one(struct pci_dev *pdev)
9719{
9720 struct net_device *dev = pci_get_drvdata(pdev);
9721 struct bnxt *bp = netdev_priv(dev);
9722
Sathya Perla4ab0c6a2017-07-24 12:34:27 -04009723 if (BNXT_PF(bp)) {
Michael Chanc0c050c2015-10-22 16:01:17 -04009724 bnxt_sriov_disable(bp);
Sathya Perla4ab0c6a2017-07-24 12:34:27 -04009725 bnxt_dl_unregister(bp);
9726 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009727
Satish Baddipadige6316ea62016-03-07 15:38:48 -05009728 pci_disable_pcie_error_reporting(pdev);
Michael Chanc0c050c2015-10-22 16:01:17 -04009729 unregister_netdev(dev);
Sathya Perla2ae74082017-08-28 13:40:33 -04009730 bnxt_shutdown_tc(bp);
Michael Chanc213eae2017-10-13 21:09:29 -04009731 bnxt_cancel_sp_work(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009732 bp->sp_event = 0;
9733
Michael Chan78095922016-12-07 00:26:16 -05009734 bnxt_clear_int_mode(bp);
Jeffrey Huangbe58a0d2015-12-27 18:19:18 -05009735 bnxt_hwrm_func_drv_unrgtr(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009736 bnxt_free_hwrm_resources(bp);
Deepak Khungare605db82017-05-29 19:06:04 -04009737 bnxt_free_hwrm_short_cmd_req(bp);
Michael Chaneb513652017-04-04 18:14:12 -04009738 bnxt_ethtool_free(bp);
Michael Chan7df4ae92016-12-02 21:17:17 -05009739 bnxt_dcb_free(bp);
Michael Chana588e452016-12-07 00:26:21 -05009740 kfree(bp->edev);
9741 bp->edev = NULL;
Michael Chan98f04cf2018-10-14 07:02:43 -04009742 bnxt_free_ctx_mem(bp);
9743 kfree(bp->ctx);
9744 bp->ctx = NULL;
Sathya Perla17086392017-02-20 19:25:18 -05009745 bnxt_cleanup_pci(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -04009746 free_netdev(dev);
Michael Chanc0c050c2015-10-22 16:01:17 -04009747}
9748
9749static int bnxt_probe_phy(struct bnxt *bp)
9750{
9751 int rc = 0;
9752 struct bnxt_link_info *link_info = &bp->link_info;
Michael Chanc0c050c2015-10-22 16:01:17 -04009753
Michael Chan170ce012016-04-05 14:08:57 -04009754 rc = bnxt_hwrm_phy_qcaps(bp);
9755 if (rc) {
9756 netdev_err(bp->dev, "Probe phy can't get phy capabilities (rc: %x)\n",
9757 rc);
9758 return rc;
9759 }
Michael Chane2dc9b62017-10-13 21:09:30 -04009760 mutex_init(&bp->link_lock);
Michael Chan170ce012016-04-05 14:08:57 -04009761
Michael Chanc0c050c2015-10-22 16:01:17 -04009762 rc = bnxt_update_link(bp, false);
9763 if (rc) {
9764 netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n",
9765 rc);
9766 return rc;
9767 }
9768
Michael Chan93ed8112016-06-13 02:25:37 -04009769 /* Older firmware does not have supported_auto_speeds, so assume
9770 * that all supported speeds can be autonegotiated.
9771 */
9772 if (link_info->auto_link_speeds && !link_info->support_auto_speeds)
9773 link_info->support_auto_speeds = link_info->support_speeds;
9774
Michael Chanc0c050c2015-10-22 16:01:17 -04009775 /*initialize the ethool setting copy with NVM settings */
Michael Chan0d8abf02016-02-10 17:33:47 -05009776 if (BNXT_AUTO_MODE(link_info->auto_mode)) {
Michael Chanc9ee9512016-04-05 14:08:56 -04009777 link_info->autoneg = BNXT_AUTONEG_SPEED;
9778 if (bp->hwrm_spec_code >= 0x10201) {
9779 if (link_info->auto_pause_setting &
9780 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE)
9781 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
9782 } else {
9783 link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
9784 }
Michael Chan0d8abf02016-02-10 17:33:47 -05009785 link_info->advertising = link_info->auto_link_speeds;
Michael Chan0d8abf02016-02-10 17:33:47 -05009786 } else {
9787 link_info->req_link_speed = link_info->force_link_speed;
9788 link_info->req_duplex = link_info->duplex_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04009789 }
Michael Chanc9ee9512016-04-05 14:08:56 -04009790 if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL)
9791 link_info->req_flow_ctrl =
9792 link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH;
9793 else
9794 link_info->req_flow_ctrl = link_info->force_pause_setting;
Michael Chanc0c050c2015-10-22 16:01:17 -04009795 return rc;
9796}
9797
9798static int bnxt_get_max_irq(struct pci_dev *pdev)
9799{
9800 u16 ctrl;
9801
9802 if (!pdev->msix_cap)
9803 return 1;
9804
9805 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl);
9806 return (ctrl & PCI_MSIX_FLAGS_QSIZE) + 1;
9807}
9808
Michael Chan6e6c5a52016-01-02 23:45:02 -05009809static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
9810 int *max_cp)
Michael Chanc0c050c2015-10-22 16:01:17 -04009811{
Michael Chan6a4f2942018-01-17 03:21:06 -05009812 struct bnxt_hw_resc *hw_resc = &bp->hw_resc;
Michael Chan6e6c5a52016-01-02 23:45:02 -05009813 int max_ring_grps = 0;
Michael Chanc0c050c2015-10-22 16:01:17 -04009814
Michael Chan6a4f2942018-01-17 03:21:06 -05009815 *max_tx = hw_resc->max_tx_rings;
9816 *max_rx = hw_resc->max_rx_rings;
Michael Chan00fe9c32018-09-03 04:23:19 -04009817 *max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
Vasundhara Volamc78fe052018-10-05 00:26:03 -04009818 hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
Michael Chan6a4f2942018-01-17 03:21:06 -05009819 *max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
9820 max_ring_grps = hw_resc->max_hw_ring_grps;
Prashant Sreedharan76595192016-07-18 07:15:22 -04009821 if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
9822 *max_cp -= 1;
9823 *max_rx -= 2;
9824 }
Michael Chanc0c050c2015-10-22 16:01:17 -04009825 if (bp->flags & BNXT_FLAG_AGG_RINGS)
9826 *max_rx >>= 1;
Michael Chanb72d4a62015-12-27 18:19:27 -05009827 *max_rx = min_t(int, *max_rx, max_ring_grps);
Michael Chan6e6c5a52016-01-02 23:45:02 -05009828}
9829
9830int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
9831{
9832 int rx, tx, cp;
9833
9834 _bnxt_get_max_rings(bp, &rx, &tx, &cp);
Michael Chan78f058a2018-07-09 02:24:49 -04009835 *max_rx = rx;
9836 *max_tx = tx;
Michael Chan6e6c5a52016-01-02 23:45:02 -05009837 if (!rx || !tx || !cp)
9838 return -ENOMEM;
9839
Michael Chan6e6c5a52016-01-02 23:45:02 -05009840 return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
9841}
9842
Michael Chane4060d32016-12-07 00:26:19 -05009843static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
9844 bool shared)
9845{
9846 int rc;
9847
9848 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
Michael Chanbdbd1eb2016-12-29 12:13:43 -05009849 if (rc && (bp->flags & BNXT_FLAG_AGG_RINGS)) {
9850 /* Not enough rings, try disabling agg rings. */
9851 bp->flags &= ~BNXT_FLAG_AGG_RINGS;
9852 rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
Michael Chan07f4fde2018-07-09 02:24:48 -04009853 if (rc) {
9854 /* set BNXT_FLAG_AGG_RINGS back for consistency */
9855 bp->flags |= BNXT_FLAG_AGG_RINGS;
Michael Chanbdbd1eb2016-12-29 12:13:43 -05009856 return rc;
Michael Chan07f4fde2018-07-09 02:24:48 -04009857 }
Michael Chanbdbd1eb2016-12-29 12:13:43 -05009858 bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
Michael Chan1054aee2017-12-16 03:09:42 -05009859 bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
9860 bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
Michael Chanbdbd1eb2016-12-29 12:13:43 -05009861 bnxt_set_ring_params(bp);
9862 }
Michael Chane4060d32016-12-07 00:26:19 -05009863
9864 if (bp->flags & BNXT_FLAG_ROCE_CAP) {
9865 int max_cp, max_stat, max_irq;
9866
9867 /* Reserve minimum resources for RoCE */
9868 max_cp = bnxt_get_max_func_cp_rings(bp);
9869 max_stat = bnxt_get_max_func_stat_ctxs(bp);
9870 max_irq = bnxt_get_max_func_irqs(bp);
9871 if (max_cp <= BNXT_MIN_ROCE_CP_RINGS ||
9872 max_irq <= BNXT_MIN_ROCE_CP_RINGS ||
9873 max_stat <= BNXT_MIN_ROCE_STAT_CTXS)
9874 return 0;
9875
9876 max_cp -= BNXT_MIN_ROCE_CP_RINGS;
9877 max_irq -= BNXT_MIN_ROCE_CP_RINGS;
9878 max_stat -= BNXT_MIN_ROCE_STAT_CTXS;
9879 max_cp = min_t(int, max_cp, max_irq);
9880 max_cp = min_t(int, max_cp, max_stat);
9881 rc = bnxt_trim_rings(bp, max_rx, max_tx, max_cp, shared);
9882 if (rc)
9883 rc = 0;
9884 }
9885 return rc;
9886}
9887
Michael Chan58ea8012018-01-17 03:21:08 -05009888/* In initial default shared ring setting, each shared ring must have a
9889 * RX/TX ring pair.
9890 */
9891static void bnxt_trim_dflt_sh_rings(struct bnxt *bp)
9892{
9893 bp->cp_nr_rings = min_t(int, bp->tx_nr_rings_per_tc, bp->rx_nr_rings);
9894 bp->rx_nr_rings = bp->cp_nr_rings;
9895 bp->tx_nr_rings_per_tc = bp->cp_nr_rings;
9896 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
9897}
9898
Michael Chan702c2212017-05-29 19:06:10 -04009899static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh)
Michael Chan6e6c5a52016-01-02 23:45:02 -05009900{
9901 int dflt_rings, max_rx_rings, max_tx_rings, rc;
Michael Chan6e6c5a52016-01-02 23:45:02 -05009902
Michael Chan2773dfb2018-04-26 17:44:42 -04009903 if (!bnxt_can_reserve_rings(bp))
9904 return 0;
9905
Michael Chan6e6c5a52016-01-02 23:45:02 -05009906 if (sh)
9907 bp->flags |= BNXT_FLAG_SHARED_RINGS;
9908 dflt_rings = netif_get_num_default_rss_queues();
Michael Chan1d3ef132018-03-31 13:54:07 -04009909 /* Reduce default rings on multi-port cards so that total default
9910 * rings do not exceed CPU count.
9911 */
9912 if (bp->port_count > 1) {
9913 int max_rings =
9914 max_t(int, num_online_cpus() / bp->port_count, 1);
9915
9916 dflt_rings = min_t(int, dflt_rings, max_rings);
9917 }
Michael Chane4060d32016-12-07 00:26:19 -05009918 rc = bnxt_get_dflt_rings(bp, &max_rx_rings, &max_tx_rings, sh);
Michael Chan6e6c5a52016-01-02 23:45:02 -05009919 if (rc)
9920 return rc;
9921 bp->rx_nr_rings = min_t(int, dflt_rings, max_rx_rings);
9922 bp->tx_nr_rings_per_tc = min_t(int, dflt_rings, max_tx_rings);
Michael Chan58ea8012018-01-17 03:21:08 -05009923 if (sh)
9924 bnxt_trim_dflt_sh_rings(bp);
9925 else
9926 bp->cp_nr_rings = bp->tx_nr_rings_per_tc + bp->rx_nr_rings;
9927 bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
Michael Chan391be5c2016-12-29 12:13:41 -05009928
Michael Chan674f50a2018-01-17 03:21:09 -05009929 rc = __bnxt_reserve_rings(bp);
Michael Chan391be5c2016-12-29 12:13:41 -05009930 if (rc)
9931 netdev_warn(bp->dev, "Unable to reserve tx rings\n");
Michael Chan58ea8012018-01-17 03:21:08 -05009932 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9933 if (sh)
9934 bnxt_trim_dflt_sh_rings(bp);
Michael Chan391be5c2016-12-29 12:13:41 -05009935
Michael Chan674f50a2018-01-17 03:21:09 -05009936 /* Rings may have been trimmed, re-reserve the trimmed rings. */
9937 if (bnxt_need_reserve_rings(bp)) {
9938 rc = __bnxt_reserve_rings(bp);
9939 if (rc)
9940 netdev_warn(bp->dev, "2nd rings reservation failed.\n");
9941 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9942 }
Michael Chan6e6c5a52016-01-02 23:45:02 -05009943 bp->num_stat_ctxs = bp->cp_nr_rings;
Prashant Sreedharan76595192016-07-18 07:15:22 -04009944 if (BNXT_CHIP_TYPE_NITRO_A0(bp)) {
9945 bp->rx_nr_rings++;
9946 bp->cp_nr_rings++;
9947 }
Michael Chan6e6c5a52016-01-02 23:45:02 -05009948 return rc;
Michael Chanc0c050c2015-10-22 16:01:17 -04009949}
9950
Michael Chan47558ac2018-04-26 17:44:44 -04009951static int bnxt_init_dflt_ring_mode(struct bnxt *bp)
9952{
9953 int rc;
9954
9955 if (bp->tx_nr_rings)
9956 return 0;
9957
Michael Chan6b95c3e2018-09-03 04:23:17 -04009958 bnxt_ulp_irq_stop(bp);
9959 bnxt_clear_int_mode(bp);
Michael Chan47558ac2018-04-26 17:44:44 -04009960 rc = bnxt_set_dflt_rings(bp, true);
9961 if (rc) {
9962 netdev_err(bp->dev, "Not enough rings available.\n");
Michael Chan6b95c3e2018-09-03 04:23:17 -04009963 goto init_dflt_ring_err;
Michael Chan47558ac2018-04-26 17:44:44 -04009964 }
9965 rc = bnxt_init_int_mode(bp);
9966 if (rc)
Michael Chan6b95c3e2018-09-03 04:23:17 -04009967 goto init_dflt_ring_err;
9968
Michael Chan47558ac2018-04-26 17:44:44 -04009969 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
9970 if (bnxt_rfs_supported(bp) && bnxt_rfs_capable(bp)) {
9971 bp->flags |= BNXT_FLAG_RFS;
9972 bp->dev->features |= NETIF_F_NTUPLE;
9973 }
Michael Chan6b95c3e2018-09-03 04:23:17 -04009974init_dflt_ring_err:
9975 bnxt_ulp_irq_restart(bp, rc);
9976 return rc;
Michael Chan47558ac2018-04-26 17:44:44 -04009977}
9978
Michael Chan80fcaf42018-01-17 03:21:05 -05009979int bnxt_restore_pf_fw_resources(struct bnxt *bp)
Michael Chan7b08f662016-12-07 00:26:18 -05009980{
Michael Chan80fcaf42018-01-17 03:21:05 -05009981 int rc;
9982
Michael Chan7b08f662016-12-07 00:26:18 -05009983 ASSERT_RTNL();
9984 bnxt_hwrm_func_qcaps(bp);
Venkat Duvvuru1a037782018-03-09 23:46:09 -05009985
9986 if (netif_running(bp->dev))
9987 __bnxt_close_nic(bp, true, false);
9988
Michael Chanec86f142018-03-31 13:54:21 -04009989 bnxt_ulp_irq_stop(bp);
Michael Chan80fcaf42018-01-17 03:21:05 -05009990 bnxt_clear_int_mode(bp);
9991 rc = bnxt_init_int_mode(bp);
Michael Chanec86f142018-03-31 13:54:21 -04009992 bnxt_ulp_irq_restart(bp, rc);
Venkat Duvvuru1a037782018-03-09 23:46:09 -05009993
9994 if (netif_running(bp->dev)) {
9995 if (rc)
9996 dev_close(bp->dev);
9997 else
9998 rc = bnxt_open_nic(bp, true, false);
9999 }
10000
Michael Chan80fcaf42018-01-17 03:21:05 -050010001 return rc;
Michael Chan7b08f662016-12-07 00:26:18 -050010002}
10003
Michael Chana22a6ac2017-08-23 19:34:05 -040010004static int bnxt_init_mac_addr(struct bnxt *bp)
10005{
10006 int rc = 0;
10007
10008 if (BNXT_PF(bp)) {
10009 memcpy(bp->dev->dev_addr, bp->pf.mac_addr, ETH_ALEN);
10010 } else {
10011#ifdef CONFIG_BNXT_SRIOV
10012 struct bnxt_vf_info *vf = &bp->vf;
Michael Chan28ea3342018-09-14 15:41:29 -040010013 bool strict_approval = true;
Michael Chana22a6ac2017-08-23 19:34:05 -040010014
10015 if (is_valid_ether_addr(vf->mac_addr)) {
Vasundhara Volam91cdda42018-01-17 03:21:14 -050010016 /* overwrite netdev dev_addr with admin VF MAC */
Michael Chana22a6ac2017-08-23 19:34:05 -040010017 memcpy(bp->dev->dev_addr, vf->mac_addr, ETH_ALEN);
Michael Chan28ea3342018-09-14 15:41:29 -040010018 /* Older PF driver or firmware may not approve this
10019 * correctly.
10020 */
10021 strict_approval = false;
Michael Chana22a6ac2017-08-23 19:34:05 -040010022 } else {
10023 eth_hw_addr_random(bp->dev);
Michael Chana22a6ac2017-08-23 19:34:05 -040010024 }
Michael Chan28ea3342018-09-14 15:41:29 -040010025 rc = bnxt_approve_mac(bp, bp->dev->dev_addr, strict_approval);
Michael Chana22a6ac2017-08-23 19:34:05 -040010026#endif
10027 }
10028 return rc;
10029}
10030
Michael Chanc0c050c2015-10-22 16:01:17 -040010031static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
10032{
10033 static int version_printed;
10034 struct net_device *dev;
10035 struct bnxt *bp;
Michael Chan6e6c5a52016-01-02 23:45:02 -050010036 int rc, max_irqs;
Michael Chanc0c050c2015-10-22 16:01:17 -040010037
Ray Jui4e003382017-02-20 19:25:16 -050010038 if (pci_is_bridge(pdev))
Prashant Sreedharanfa853dd2016-07-18 07:15:25 -040010039 return -ENODEV;
10040
Michael Chanc0c050c2015-10-22 16:01:17 -040010041 if (version_printed++ == 0)
10042 pr_info("%s", version);
10043
10044 max_irqs = bnxt_get_max_irq(pdev);
10045 dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
10046 if (!dev)
10047 return -ENOMEM;
10048
10049 bp = netdev_priv(dev);
Michael Chan9c1fabd2018-10-14 07:02:47 -040010050 bnxt_set_max_func_irqs(bp, max_irqs);
Michael Chanc0c050c2015-10-22 16:01:17 -040010051
10052 if (bnxt_vf_pciid(ent->driver_data))
10053 bp->flags |= BNXT_FLAG_VF;
10054
Michael Chan2bcfa6f2015-12-27 18:19:24 -050010055 if (pdev->msix_cap)
Michael Chanc0c050c2015-10-22 16:01:17 -040010056 bp->flags |= BNXT_FLAG_MSIX_CAP;
Michael Chanc0c050c2015-10-22 16:01:17 -040010057
10058 rc = bnxt_init_board(pdev, dev);
10059 if (rc < 0)
10060 goto init_err_free;
10061
10062 dev->netdev_ops = &bnxt_netdev_ops;
10063 dev->watchdog_timeo = BNXT_TX_TIMEOUT;
10064 dev->ethtool_ops = &bnxt_ethtool_ops;
David S. Millerbc880552017-07-24 21:20:16 -070010065 SWITCHDEV_SET_OPS(dev, &bnxt_switchdev_ops);
Michael Chanc0c050c2015-10-22 16:01:17 -040010066 pci_set_drvdata(pdev, dev);
10067
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010068 rc = bnxt_alloc_hwrm_resources(bp);
10069 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010070 goto init_err_pci_clean;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010071
10072 mutex_init(&bp->hwrm_cmd_lock);
10073 rc = bnxt_hwrm_ver_get(bp);
10074 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010075 goto init_err_pci_clean;
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010076
Michael Chan1dfddc42018-10-14 07:02:39 -040010077 if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) ||
10078 bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) {
Deepak Khungare605db82017-05-29 19:06:04 -040010079 rc = bnxt_alloc_hwrm_short_cmd_req(bp);
10080 if (rc)
10081 goto init_err_pci_clean;
10082 }
10083
Michael Chane38287b2018-10-14 07:02:45 -040010084 if (BNXT_CHIP_P5(bp))
10085 bp->flags |= BNXT_FLAG_CHIP_P5;
10086
Michael Chan3c2217a2017-03-08 18:44:32 -050010087 rc = bnxt_hwrm_func_reset(bp);
10088 if (rc)
10089 goto init_err_pci_clean;
10090
Rob Swindell5ac67d82016-09-19 03:58:03 -040010091 bnxt_hwrm_fw_set_time(bp);
10092
Michael Chanc0c050c2015-10-22 16:01:17 -040010093 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10094 NETIF_F_TSO | NETIF_F_TSO6 |
10095 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Tom Herbert7e133182016-05-18 09:06:10 -070010096 NETIF_F_GSO_IPXIP4 |
Alexander Duyck152971e2016-05-02 09:38:55 -070010097 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
10098 NETIF_F_GSO_PARTIAL | NETIF_F_RXHASH |
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010099 NETIF_F_RXCSUM | NETIF_F_GRO;
10100
Michael Chane38287b2018-10-14 07:02:45 -040010101 if (BNXT_SUPPORTS_TPA(bp))
Prashant Sreedharan3e8060f2016-07-18 07:15:20 -040010102 dev->hw_features |= NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -040010103
Michael Chanc0c050c2015-10-22 16:01:17 -040010104 dev->hw_enc_features =
10105 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
10106 NETIF_F_TSO | NETIF_F_TSO6 |
10107 NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_GRE |
Alexander Duyck152971e2016-05-02 09:38:55 -070010108 NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_GRE_CSUM |
Tom Herbert7e133182016-05-18 09:06:10 -070010109 NETIF_F_GSO_IPXIP4 | NETIF_F_GSO_PARTIAL;
Alexander Duyck152971e2016-05-02 09:38:55 -070010110 dev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM |
10111 NETIF_F_GSO_GRE_CSUM;
Michael Chanc0c050c2015-10-22 16:01:17 -040010112 dev->vlan_features = dev->hw_features | NETIF_F_HIGHDMA;
10113 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
10114 NETIF_F_HW_VLAN_STAG_RX | NETIF_F_HW_VLAN_STAG_TX;
Michael Chane38287b2018-10-14 07:02:45 -040010115 if (BNXT_SUPPORTS_TPA(bp))
Michael Chan1054aee2017-12-16 03:09:42 -050010116 dev->hw_features |= NETIF_F_GRO_HW;
Michael Chanc0c050c2015-10-22 16:01:17 -040010117 dev->features |= dev->hw_features | NETIF_F_HIGHDMA;
Michael Chan1054aee2017-12-16 03:09:42 -050010118 if (dev->features & NETIF_F_GRO_HW)
10119 dev->features &= ~NETIF_F_LRO;
Michael Chanc0c050c2015-10-22 16:01:17 -040010120 dev->priv_flags |= IFF_UNICAST_FLT;
10121
10122#ifdef CONFIG_BNXT_SRIOV
10123 init_waitqueue_head(&bp->sriov_cfg_wait);
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040010124 mutex_init(&bp->sriov_lock);
Michael Chanc0c050c2015-10-22 16:01:17 -040010125#endif
Michael Chane38287b2018-10-14 07:02:45 -040010126 if (BNXT_SUPPORTS_TPA(bp)) {
10127 bp->gro_func = bnxt_gro_func_5730x;
10128 if (BNXT_CHIP_P4(bp))
10129 bp->gro_func = bnxt_gro_func_5731x;
10130 }
10131 if (!BNXT_CHIP_P4_PLUS(bp))
Michael Chan434c9752017-05-29 19:06:08 -040010132 bp->flags |= BNXT_FLAG_DOUBLE_DB;
Michael Chan309369c2016-06-13 02:25:34 -040010133
Michael Chanc0c050c2015-10-22 16:01:17 -040010134 rc = bnxt_hwrm_func_drv_rgtr(bp);
10135 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010136 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -040010137
Michael Chana1653b12016-12-07 00:26:20 -050010138 rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
10139 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010140 goto init_err_pci_clean;
Michael Chana1653b12016-12-07 00:26:20 -050010141
Michael Chana588e452016-12-07 00:26:21 -050010142 bp->ulp_probe = bnxt_ulp_probe;
10143
Michael Chan98f04cf2018-10-14 07:02:43 -040010144 rc = bnxt_hwrm_queue_qportcfg(bp);
10145 if (rc) {
10146 netdev_err(bp->dev, "hwrm query qportcfg failure rc: %x\n",
10147 rc);
10148 rc = -1;
10149 goto init_err_pci_clean;
10150 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010151 /* Get the MAX capabilities for this function */
10152 rc = bnxt_hwrm_func_qcaps(bp);
10153 if (rc) {
10154 netdev_err(bp->dev, "hwrm query capability failure rc: %x\n",
10155 rc);
10156 rc = -1;
Sathya Perla17086392017-02-20 19:25:18 -050010157 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -040010158 }
Michael Chana22a6ac2017-08-23 19:34:05 -040010159 rc = bnxt_init_mac_addr(bp);
10160 if (rc) {
10161 dev_err(&pdev->dev, "Unable to initialize mac address.\n");
10162 rc = -EADDRNOTAVAIL;
10163 goto init_err_pci_clean;
10164 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010165
Satish Baddipadige567b2ab2016-06-13 02:25:31 -040010166 bnxt_hwrm_func_qcfg(bp);
Michael Chan6ba99032018-11-15 03:25:37 -050010167 bnxt_hwrm_vnic_qcaps(bp);
Michael Chan5ad2cbe2017-01-13 01:32:03 -050010168 bnxt_hwrm_port_led_qcaps(bp);
Michael Chaneb513652017-04-04 18:14:12 -040010169 bnxt_ethtool_init(bp);
Michael Chan87fe6032017-05-16 16:39:43 -040010170 bnxt_dcb_init(bp);
Satish Baddipadige567b2ab2016-06-13 02:25:31 -040010171
Michael Chan7eb9bb32017-10-26 11:51:25 -040010172 /* MTU range: 60 - FW defined max */
10173 dev->min_mtu = ETH_ZLEN;
10174 dev->max_mtu = bp->max_mtu;
10175
Michael Chand5430d32017-08-28 13:40:31 -040010176 rc = bnxt_probe_phy(bp);
10177 if (rc)
10178 goto init_err_pci_clean;
10179
Michael Chanc61fb992017-02-06 16:55:36 -050010180 bnxt_set_rx_skb_mode(bp, false);
Michael Chanc0c050c2015-10-22 16:01:17 -040010181 bnxt_set_tpa_flags(bp);
10182 bnxt_set_ring_params(bp);
Michael Chan702c2212017-05-29 19:06:10 -040010183 rc = bnxt_set_dflt_rings(bp, true);
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010184 if (rc) {
10185 netdev_err(bp->dev, "Not enough rings available.\n");
10186 rc = -ENOMEM;
Sathya Perla17086392017-02-20 19:25:18 -050010187 goto init_err_pci_clean;
Michael Chanbdbd1eb2016-12-29 12:13:43 -050010188 }
Michael Chanc0c050c2015-10-22 16:01:17 -040010189
Michael Chan87da7f72016-11-16 21:13:09 -050010190 /* Default RSS hash cfg. */
10191 bp->rss_hash_cfg = VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4 |
10192 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4 |
10193 VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6 |
10194 VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6;
Michael Chane38287b2018-10-14 07:02:45 -040010195 if (BNXT_CHIP_P4(bp) && bp->hwrm_spec_code >= 0x10501) {
Michael Chan87da7f72016-11-16 21:13:09 -050010196 bp->flags |= BNXT_FLAG_UDP_RSS_CAP;
10197 bp->rss_hash_cfg |= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4 |
10198 VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6;
10199 }
10200
Michael Chan8079e8f2016-12-29 12:13:37 -050010201 if (bnxt_rfs_supported(bp)) {
Michael Chan2bcfa6f2015-12-27 18:19:24 -050010202 dev->hw_features |= NETIF_F_NTUPLE;
10203 if (bnxt_rfs_capable(bp)) {
10204 bp->flags |= BNXT_FLAG_RFS;
10205 dev->features |= NETIF_F_NTUPLE;
10206 }
10207 }
10208
Michael Chanc0c050c2015-10-22 16:01:17 -040010209 if (dev->hw_features & NETIF_F_HW_VLAN_CTAG_RX)
10210 bp->flags |= BNXT_FLAG_STRIP_VLAN;
10211
Michael Chan78095922016-12-07 00:26:16 -050010212 rc = bnxt_init_int_mode(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010213 if (rc)
Sathya Perla17086392017-02-20 19:25:18 -050010214 goto init_err_pci_clean;
Michael Chanc0c050c2015-10-22 16:01:17 -040010215
Michael Chan832aed12018-03-09 23:46:07 -050010216 /* No TC has been set yet and rings may have been trimmed due to
10217 * limited MSIX, so we re-initialize the TX rings per TC.
10218 */
10219 bp->tx_nr_rings_per_tc = bp->tx_nr_rings;
10220
Michael Chanc1ef1462017-04-04 18:14:07 -040010221 bnxt_get_wol_settings(bp);
Michael Chand196ece2017-04-04 18:14:08 -040010222 if (bp->flags & BNXT_FLAG_WOL_CAP)
10223 device_set_wakeup_enable(&pdev->dev, bp->wol);
10224 else
10225 device_set_wakeup_capable(&pdev->dev, false);
Michael Chanc1ef1462017-04-04 18:14:07 -040010226
Michael Chanc3480a62018-01-17 03:21:15 -050010227 bnxt_hwrm_set_cache_line_size(bp, cache_line_size());
10228
Michael Chan74706af2018-10-14 07:02:40 -040010229 bnxt_hwrm_coal_params_qcaps(bp);
10230
Michael Chanc213eae2017-10-13 21:09:29 -040010231 if (BNXT_PF(bp)) {
10232 if (!bnxt_pf_wq) {
10233 bnxt_pf_wq =
10234 create_singlethread_workqueue("bnxt_pf_wq");
10235 if (!bnxt_pf_wq) {
10236 dev_err(&pdev->dev, "Unable to create workqueue.\n");
10237 goto init_err_pci_clean;
10238 }
10239 }
Sathya Perla2ae74082017-08-28 13:40:33 -040010240 bnxt_init_tc(bp);
Michael Chanc213eae2017-10-13 21:09:29 -040010241 }
Sathya Perla2ae74082017-08-28 13:40:33 -040010242
Michael Chan78095922016-12-07 00:26:16 -050010243 rc = register_netdev(dev);
10244 if (rc)
Sathya Perla2ae74082017-08-28 13:40:33 -040010245 goto init_err_cleanup_tc;
Michael Chan78095922016-12-07 00:26:16 -050010246
Sathya Perla4ab0c6a2017-07-24 12:34:27 -040010247 if (BNXT_PF(bp))
10248 bnxt_dl_register(bp);
10249
Michael Chanc0c050c2015-10-22 16:01:17 -040010250 netdev_info(dev, "%s found at mem %lx, node addr %pM\n",
10251 board_info[ent->driver_data].name,
10252 (long)pci_resource_start(pdev, 0), dev->dev_addr);
Bjorn Helgaasaf125b72018-03-30 14:09:54 -050010253 pcie_print_link_status(pdev);
Ajit Khaparde90c4f782016-05-15 03:04:45 -040010254
Michael Chanc0c050c2015-10-22 16:01:17 -040010255 return 0;
10256
Sathya Perla2ae74082017-08-28 13:40:33 -040010257init_err_cleanup_tc:
10258 bnxt_shutdown_tc(bp);
Michael Chan78095922016-12-07 00:26:16 -050010259 bnxt_clear_int_mode(bp);
10260
Sathya Perla17086392017-02-20 19:25:18 -050010261init_err_pci_clean:
Venkat Duvvurua2bf74f2018-10-05 00:26:02 -040010262 bnxt_free_hwrm_resources(bp);
Michael Chan98f04cf2018-10-14 07:02:43 -040010263 bnxt_free_ctx_mem(bp);
10264 kfree(bp->ctx);
10265 bp->ctx = NULL;
Sathya Perla17086392017-02-20 19:25:18 -050010266 bnxt_cleanup_pci(bp);
Michael Chanc0c050c2015-10-22 16:01:17 -040010267
10268init_err_free:
10269 free_netdev(dev);
10270 return rc;
10271}
10272
Michael Chand196ece2017-04-04 18:14:08 -040010273static void bnxt_shutdown(struct pci_dev *pdev)
10274{
10275 struct net_device *dev = pci_get_drvdata(pdev);
10276 struct bnxt *bp;
10277
10278 if (!dev)
10279 return;
10280
10281 rtnl_lock();
10282 bp = netdev_priv(dev);
10283 if (!bp)
10284 goto shutdown_exit;
10285
10286 if (netif_running(dev))
10287 dev_close(dev);
10288
Ray Juia7f3f932017-12-01 03:13:02 -050010289 bnxt_ulp_shutdown(bp);
10290
Michael Chand196ece2017-04-04 18:14:08 -040010291 if (system_state == SYSTEM_POWER_OFF) {
10292 bnxt_clear_int_mode(bp);
10293 pci_wake_from_d3(pdev, bp->wol);
10294 pci_set_power_state(pdev, PCI_D3hot);
10295 }
10296
10297shutdown_exit:
10298 rtnl_unlock();
10299}
10300
Michael Chanf65a2042017-04-04 18:14:11 -040010301#ifdef CONFIG_PM_SLEEP
10302static int bnxt_suspend(struct device *device)
10303{
10304 struct pci_dev *pdev = to_pci_dev(device);
10305 struct net_device *dev = pci_get_drvdata(pdev);
10306 struct bnxt *bp = netdev_priv(dev);
10307 int rc = 0;
10308
10309 rtnl_lock();
10310 if (netif_running(dev)) {
10311 netif_device_detach(dev);
10312 rc = bnxt_close(dev);
10313 }
10314 bnxt_hwrm_func_drv_unrgtr(bp);
10315 rtnl_unlock();
10316 return rc;
10317}
10318
10319static int bnxt_resume(struct device *device)
10320{
10321 struct pci_dev *pdev = to_pci_dev(device);
10322 struct net_device *dev = pci_get_drvdata(pdev);
10323 struct bnxt *bp = netdev_priv(dev);
10324 int rc = 0;
10325
10326 rtnl_lock();
10327 if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
10328 rc = -ENODEV;
10329 goto resume_exit;
10330 }
10331 rc = bnxt_hwrm_func_reset(bp);
10332 if (rc) {
10333 rc = -EBUSY;
10334 goto resume_exit;
10335 }
10336 bnxt_get_wol_settings(bp);
10337 if (netif_running(dev)) {
10338 rc = bnxt_open(dev);
10339 if (!rc)
10340 netif_device_attach(dev);
10341 }
10342
10343resume_exit:
10344 rtnl_unlock();
10345 return rc;
10346}
10347
10348static SIMPLE_DEV_PM_OPS(bnxt_pm_ops, bnxt_suspend, bnxt_resume);
10349#define BNXT_PM_OPS (&bnxt_pm_ops)
10350
10351#else
10352
10353#define BNXT_PM_OPS NULL
10354
10355#endif /* CONFIG_PM_SLEEP */
10356
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010357/**
10358 * bnxt_io_error_detected - called when PCI error is detected
10359 * @pdev: Pointer to PCI device
10360 * @state: The current pci connection state
10361 *
10362 * This function is called after a PCI bus error affecting
10363 * this device has been detected.
10364 */
10365static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev,
10366 pci_channel_state_t state)
10367{
10368 struct net_device *netdev = pci_get_drvdata(pdev);
Michael Chana588e452016-12-07 00:26:21 -050010369 struct bnxt *bp = netdev_priv(netdev);
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010370
10371 netdev_info(netdev, "PCI I/O error detected\n");
10372
10373 rtnl_lock();
10374 netif_device_detach(netdev);
10375
Michael Chana588e452016-12-07 00:26:21 -050010376 bnxt_ulp_stop(bp);
10377
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010378 if (state == pci_channel_io_perm_failure) {
10379 rtnl_unlock();
10380 return PCI_ERS_RESULT_DISCONNECT;
10381 }
10382
10383 if (netif_running(netdev))
10384 bnxt_close(netdev);
10385
10386 pci_disable_device(pdev);
10387 rtnl_unlock();
10388
10389 /* Request a slot slot reset. */
10390 return PCI_ERS_RESULT_NEED_RESET;
10391}
10392
10393/**
10394 * bnxt_io_slot_reset - called after the pci bus has been reset.
10395 * @pdev: Pointer to PCI device
10396 *
10397 * Restart the card from scratch, as if from a cold-boot.
10398 * At this point, the card has exprienced a hard reset,
10399 * followed by fixups by BIOS, and has its config space
10400 * set up identically to what it was at cold boot.
10401 */
10402static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev)
10403{
10404 struct net_device *netdev = pci_get_drvdata(pdev);
10405 struct bnxt *bp = netdev_priv(netdev);
10406 int err = 0;
10407 pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT;
10408
10409 netdev_info(bp->dev, "PCI Slot Reset\n");
10410
10411 rtnl_lock();
10412
10413 if (pci_enable_device(pdev)) {
10414 dev_err(&pdev->dev,
10415 "Cannot re-enable PCI device after reset.\n");
10416 } else {
10417 pci_set_master(pdev);
10418
Michael Chanaa8ed022016-12-07 00:26:17 -050010419 err = bnxt_hwrm_func_reset(bp);
10420 if (!err && netif_running(netdev))
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010421 err = bnxt_open(netdev);
10422
Michael Chana588e452016-12-07 00:26:21 -050010423 if (!err) {
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010424 result = PCI_ERS_RESULT_RECOVERED;
Michael Chana588e452016-12-07 00:26:21 -050010425 bnxt_ulp_start(bp);
10426 }
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010427 }
10428
10429 if (result != PCI_ERS_RESULT_RECOVERED && netif_running(netdev))
10430 dev_close(netdev);
10431
10432 rtnl_unlock();
10433
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010434 return PCI_ERS_RESULT_RECOVERED;
10435}
10436
10437/**
10438 * bnxt_io_resume - called when traffic can start flowing again.
10439 * @pdev: Pointer to PCI device
10440 *
10441 * This callback is called when the error recovery driver tells
10442 * us that its OK to resume normal operation.
10443 */
10444static void bnxt_io_resume(struct pci_dev *pdev)
10445{
10446 struct net_device *netdev = pci_get_drvdata(pdev);
10447
10448 rtnl_lock();
10449
10450 netif_device_attach(netdev);
10451
10452 rtnl_unlock();
10453}
10454
10455static const struct pci_error_handlers bnxt_err_handler = {
10456 .error_detected = bnxt_io_error_detected,
10457 .slot_reset = bnxt_io_slot_reset,
10458 .resume = bnxt_io_resume
10459};
10460
Michael Chanc0c050c2015-10-22 16:01:17 -040010461static struct pci_driver bnxt_pci_driver = {
10462 .name = DRV_MODULE_NAME,
10463 .id_table = bnxt_pci_tbl,
10464 .probe = bnxt_init_one,
10465 .remove = bnxt_remove_one,
Michael Chand196ece2017-04-04 18:14:08 -040010466 .shutdown = bnxt_shutdown,
Michael Chanf65a2042017-04-04 18:14:11 -040010467 .driver.pm = BNXT_PM_OPS,
Satish Baddipadige6316ea62016-03-07 15:38:48 -050010468 .err_handler = &bnxt_err_handler,
Michael Chanc0c050c2015-10-22 16:01:17 -040010469#if defined(CONFIG_BNXT_SRIOV)
10470 .sriov_configure = bnxt_sriov_configure,
10471#endif
10472};
10473
Michael Chanc213eae2017-10-13 21:09:29 -040010474static int __init bnxt_init(void)
10475{
Andy Gospodarekcabfb092018-04-26 17:44:40 -040010476 bnxt_debug_init();
Michael Chanc213eae2017-10-13 21:09:29 -040010477 return pci_register_driver(&bnxt_pci_driver);
10478}
10479
10480static void __exit bnxt_exit(void)
10481{
10482 pci_unregister_driver(&bnxt_pci_driver);
10483 if (bnxt_pf_wq)
10484 destroy_workqueue(bnxt_pf_wq);
Andy Gospodarekcabfb092018-04-26 17:44:40 -040010485 bnxt_debug_exit();
Michael Chanc213eae2017-10-13 21:09:29 -040010486}
10487
10488module_init(bnxt_init);
10489module_exit(bnxt_exit);