blob: 1d678bee2cc9c26f63ad2414a61206f8c63af5f9 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002/*
3 * Broadcom GENET (Gigabit Ethernet) controller driver
4 *
Doug Berger99d55632019-12-17 16:51:08 -08005 * Copyright (c) 2014-2019 Broadcom
Florian Fainelli1c1008c2014-02-13 16:08:47 -08006 */
7
8#define pr_fmt(fmt) "bcmgenet: " fmt
9
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/types.h>
14#include <linux/fcntl.h>
15#include <linux/interrupt.h>
16#include <linux/string.h>
17#include <linux/if_ether.h>
18#include <linux/init.h>
19#include <linux/errno.h>
20#include <linux/delay.h>
21#include <linux/platform_device.h>
22#include <linux/dma-mapping.h>
23#include <linux/pm.h>
24#include <linux/clk.h>
Florian Fainelli1c1008c2014-02-13 16:08:47 -080025#include <linux/of.h>
26#include <linux/of_address.h>
27#include <linux/of_irq.h>
28#include <linux/of_net.h>
29#include <linux/of_platform.h>
30#include <net/arp.h>
31
32#include <linux/mii.h>
33#include <linux/ethtool.h>
34#include <linux/netdevice.h>
35#include <linux/inetdevice.h>
36#include <linux/etherdevice.h>
37#include <linux/skbuff.h>
38#include <linux/in.h>
39#include <linux/ip.h>
40#include <linux/ipv6.h>
41#include <linux/phy.h>
Petri Gyntherb0ba5122014-12-01 16:18:08 -080042#include <linux/platform_data/bcmgenet.h>
Florian Fainelli1c1008c2014-02-13 16:08:47 -080043
44#include <asm/unaligned.h>
45
46#include "bcmgenet.h"
47
48/* Maximum number of hardware queues, downsized if needed */
49#define GENET_MAX_MQ_CNT 4
50
51/* Default highest priority queue for multi queue support */
52#define GENET_Q0_PRIORITY 0
53
Petri Gynther3feafa02015-03-05 17:40:14 -080054#define GENET_Q16_RX_BD_CNT \
55 (TOTAL_DESC - priv->hw_params->rx_queues * priv->hw_params->rx_bds_per_q)
Petri Gynther51a966a2015-02-23 11:00:46 -080056#define GENET_Q16_TX_BD_CNT \
57 (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->tx_bds_per_q)
Florian Fainelli1c1008c2014-02-13 16:08:47 -080058
59#define RX_BUF_LENGTH 2048
60#define SKB_ALIGNMENT 32
61
62/* Tx/Rx DMA register offset, skip 256 descriptors */
63#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
64#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
65
66#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
67 TOTAL_DESC * DMA_DESC_SIZE)
68
69#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
70 TOTAL_DESC * DMA_DESC_SIZE)
71
Florian Fainelli69d2ea92017-08-29 12:25:31 -070072static inline void bcmgenet_writel(u32 value, void __iomem *offset)
73{
74 /* MIPS chips strapped for BE will automagically configure the
75 * peripheral registers for CPU-native byte order.
76 */
77 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
78 __raw_writel(value, offset);
79 else
80 writel_relaxed(value, offset);
81}
82
83static inline u32 bcmgenet_readl(void __iomem *offset)
84{
85 if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
86 return __raw_readl(offset);
87 else
88 return readl_relaxed(offset);
89}
90
Florian Fainelli1c1008c2014-02-13 16:08:47 -080091static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -070092 void __iomem *d, u32 value)
Florian Fainelli1c1008c2014-02-13 16:08:47 -080093{
Florian Fainelli69d2ea92017-08-29 12:25:31 -070094 bcmgenet_writel(value, d + DMA_DESC_LENGTH_STATUS);
Florian Fainelli1c1008c2014-02-13 16:08:47 -080095}
96
Florian Fainelli1c1008c2014-02-13 16:08:47 -080097static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
98 void __iomem *d,
99 dma_addr_t addr)
100{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700101 bcmgenet_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800102
103 /* Register writes to GISB bus can take couple hundred nanoseconds
104 * and are done for each packet, save these expensive writes unless
Brian Norris7fc527f2014-07-29 14:34:14 -0700105 * the platform is explicitly configured for 64-bits/LPAE.
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800106 */
107#ifdef CONFIG_PHYS_ADDR_T_64BIT
108 if (priv->hw_params->flags & GENET_HAS_40BITS)
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700109 bcmgenet_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800110#endif
111}
112
113/* Combined address + length/status setter */
114static inline void dmadesc_set(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700115 void __iomem *d, dma_addr_t addr, u32 val)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800116{
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800117 dmadesc_set_addr(priv, d, addr);
Petri Gynther7ee40622016-04-05 14:00:01 -0700118 dmadesc_set_length_status(priv, d, val);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800119}
120
121static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
122 void __iomem *d)
123{
124 dma_addr_t addr;
125
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700126 addr = bcmgenet_readl(d + DMA_DESC_ADDRESS_LO);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800127
128 /* Register writes to GISB bus can take couple hundred nanoseconds
129 * and are done for each packet, save these expensive writes unless
Brian Norris7fc527f2014-07-29 14:34:14 -0700130 * the platform is explicitly configured for 64-bits/LPAE.
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800131 */
132#ifdef CONFIG_PHYS_ADDR_T_64BIT
133 if (priv->hw_params->flags & GENET_HAS_40BITS)
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700134 addr |= (u64)bcmgenet_readl(d + DMA_DESC_ADDRESS_HI) << 32;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800135#endif
136 return addr;
137}
138
139#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
140
141#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
142 NETIF_MSG_LINK)
143
144static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
145{
146 if (GENET_IS_V1(priv))
147 return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
148 else
149 return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
150}
151
152static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
153{
154 if (GENET_IS_V1(priv))
155 bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
156 else
157 bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
158}
159
160/* These macros are defined to deal with register map change
161 * between GENET1.1 and GENET2. Only those currently being used
162 * by driver are defined.
163 */
164static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
165{
166 if (GENET_IS_V1(priv))
167 return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
168 else
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700169 return bcmgenet_readl(priv->base +
170 priv->hw_params->tbuf_offset + TBUF_CTRL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800171}
172
173static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
174{
175 if (GENET_IS_V1(priv))
176 bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
177 else
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700178 bcmgenet_writel(val, priv->base +
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800179 priv->hw_params->tbuf_offset + TBUF_CTRL);
180}
181
182static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
183{
184 if (GENET_IS_V1(priv))
185 return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
186 else
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700187 return bcmgenet_readl(priv->base +
188 priv->hw_params->tbuf_offset + TBUF_BP_MC);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800189}
190
191static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
192{
193 if (GENET_IS_V1(priv))
194 bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
195 else
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700196 bcmgenet_writel(val, priv->base +
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800197 priv->hw_params->tbuf_offset + TBUF_BP_MC);
198}
199
200/* RX/TX DMA register accessors */
201enum dma_reg {
202 DMA_RING_CFG = 0,
203 DMA_CTRL,
204 DMA_STATUS,
205 DMA_SCB_BURST_SIZE,
206 DMA_ARB_CTRL,
Petri Gynther37742162014-10-07 09:30:01 -0700207 DMA_PRIORITY_0,
208 DMA_PRIORITY_1,
209 DMA_PRIORITY_2,
Petri Gynther0034de42015-03-13 14:45:00 -0700210 DMA_INDEX2RING_0,
211 DMA_INDEX2RING_1,
212 DMA_INDEX2RING_2,
213 DMA_INDEX2RING_3,
214 DMA_INDEX2RING_4,
215 DMA_INDEX2RING_5,
216 DMA_INDEX2RING_6,
217 DMA_INDEX2RING_7,
Florian Fainelli4a296452015-09-16 16:47:40 -0700218 DMA_RING0_TIMEOUT,
219 DMA_RING1_TIMEOUT,
220 DMA_RING2_TIMEOUT,
221 DMA_RING3_TIMEOUT,
222 DMA_RING4_TIMEOUT,
223 DMA_RING5_TIMEOUT,
224 DMA_RING6_TIMEOUT,
225 DMA_RING7_TIMEOUT,
226 DMA_RING8_TIMEOUT,
227 DMA_RING9_TIMEOUT,
228 DMA_RING10_TIMEOUT,
229 DMA_RING11_TIMEOUT,
230 DMA_RING12_TIMEOUT,
231 DMA_RING13_TIMEOUT,
232 DMA_RING14_TIMEOUT,
233 DMA_RING15_TIMEOUT,
234 DMA_RING16_TIMEOUT,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800235};
236
237static const u8 bcmgenet_dma_regs_v3plus[] = {
238 [DMA_RING_CFG] = 0x00,
239 [DMA_CTRL] = 0x04,
240 [DMA_STATUS] = 0x08,
241 [DMA_SCB_BURST_SIZE] = 0x0C,
242 [DMA_ARB_CTRL] = 0x2C,
Petri Gynther37742162014-10-07 09:30:01 -0700243 [DMA_PRIORITY_0] = 0x30,
244 [DMA_PRIORITY_1] = 0x34,
245 [DMA_PRIORITY_2] = 0x38,
Florian Fainelli4a296452015-09-16 16:47:40 -0700246 [DMA_RING0_TIMEOUT] = 0x2C,
247 [DMA_RING1_TIMEOUT] = 0x30,
248 [DMA_RING2_TIMEOUT] = 0x34,
249 [DMA_RING3_TIMEOUT] = 0x38,
250 [DMA_RING4_TIMEOUT] = 0x3c,
251 [DMA_RING5_TIMEOUT] = 0x40,
252 [DMA_RING6_TIMEOUT] = 0x44,
253 [DMA_RING7_TIMEOUT] = 0x48,
254 [DMA_RING8_TIMEOUT] = 0x4c,
255 [DMA_RING9_TIMEOUT] = 0x50,
256 [DMA_RING10_TIMEOUT] = 0x54,
257 [DMA_RING11_TIMEOUT] = 0x58,
258 [DMA_RING12_TIMEOUT] = 0x5c,
259 [DMA_RING13_TIMEOUT] = 0x60,
260 [DMA_RING14_TIMEOUT] = 0x64,
261 [DMA_RING15_TIMEOUT] = 0x68,
262 [DMA_RING16_TIMEOUT] = 0x6C,
Petri Gynther0034de42015-03-13 14:45:00 -0700263 [DMA_INDEX2RING_0] = 0x70,
264 [DMA_INDEX2RING_1] = 0x74,
265 [DMA_INDEX2RING_2] = 0x78,
266 [DMA_INDEX2RING_3] = 0x7C,
267 [DMA_INDEX2RING_4] = 0x80,
268 [DMA_INDEX2RING_5] = 0x84,
269 [DMA_INDEX2RING_6] = 0x88,
270 [DMA_INDEX2RING_7] = 0x8C,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800271};
272
273static const u8 bcmgenet_dma_regs_v2[] = {
274 [DMA_RING_CFG] = 0x00,
275 [DMA_CTRL] = 0x04,
276 [DMA_STATUS] = 0x08,
277 [DMA_SCB_BURST_SIZE] = 0x0C,
278 [DMA_ARB_CTRL] = 0x30,
Petri Gynther37742162014-10-07 09:30:01 -0700279 [DMA_PRIORITY_0] = 0x34,
280 [DMA_PRIORITY_1] = 0x38,
281 [DMA_PRIORITY_2] = 0x3C,
Florian Fainelli4a296452015-09-16 16:47:40 -0700282 [DMA_RING0_TIMEOUT] = 0x2C,
283 [DMA_RING1_TIMEOUT] = 0x30,
284 [DMA_RING2_TIMEOUT] = 0x34,
285 [DMA_RING3_TIMEOUT] = 0x38,
286 [DMA_RING4_TIMEOUT] = 0x3c,
287 [DMA_RING5_TIMEOUT] = 0x40,
288 [DMA_RING6_TIMEOUT] = 0x44,
289 [DMA_RING7_TIMEOUT] = 0x48,
290 [DMA_RING8_TIMEOUT] = 0x4c,
291 [DMA_RING9_TIMEOUT] = 0x50,
292 [DMA_RING10_TIMEOUT] = 0x54,
293 [DMA_RING11_TIMEOUT] = 0x58,
294 [DMA_RING12_TIMEOUT] = 0x5c,
295 [DMA_RING13_TIMEOUT] = 0x60,
296 [DMA_RING14_TIMEOUT] = 0x64,
297 [DMA_RING15_TIMEOUT] = 0x68,
298 [DMA_RING16_TIMEOUT] = 0x6C,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800299};
300
301static const u8 bcmgenet_dma_regs_v1[] = {
302 [DMA_CTRL] = 0x00,
303 [DMA_STATUS] = 0x04,
304 [DMA_SCB_BURST_SIZE] = 0x0C,
305 [DMA_ARB_CTRL] = 0x30,
Petri Gynther37742162014-10-07 09:30:01 -0700306 [DMA_PRIORITY_0] = 0x34,
307 [DMA_PRIORITY_1] = 0x38,
308 [DMA_PRIORITY_2] = 0x3C,
Florian Fainelli4a296452015-09-16 16:47:40 -0700309 [DMA_RING0_TIMEOUT] = 0x2C,
310 [DMA_RING1_TIMEOUT] = 0x30,
311 [DMA_RING2_TIMEOUT] = 0x34,
312 [DMA_RING3_TIMEOUT] = 0x38,
313 [DMA_RING4_TIMEOUT] = 0x3c,
314 [DMA_RING5_TIMEOUT] = 0x40,
315 [DMA_RING6_TIMEOUT] = 0x44,
316 [DMA_RING7_TIMEOUT] = 0x48,
317 [DMA_RING8_TIMEOUT] = 0x4c,
318 [DMA_RING9_TIMEOUT] = 0x50,
319 [DMA_RING10_TIMEOUT] = 0x54,
320 [DMA_RING11_TIMEOUT] = 0x58,
321 [DMA_RING12_TIMEOUT] = 0x5c,
322 [DMA_RING13_TIMEOUT] = 0x60,
323 [DMA_RING14_TIMEOUT] = 0x64,
324 [DMA_RING15_TIMEOUT] = 0x68,
325 [DMA_RING16_TIMEOUT] = 0x6C,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800326};
327
328/* Set at runtime once bcmgenet version is known */
329static const u8 *bcmgenet_dma_regs;
330
331static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
332{
333 return netdev_priv(dev_get_drvdata(dev));
334}
335
336static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700337 enum dma_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800338{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700339 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
340 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800341}
342
343static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
344 u32 val, enum dma_reg r)
345{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700346 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800347 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
348}
349
350static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700351 enum dma_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800352{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700353 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
354 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800355}
356
357static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
358 u32 val, enum dma_reg r)
359{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700360 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800361 DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
362}
363
364/* RDMA/TDMA ring registers and accessors
365 * we merge the common fields and just prefix with T/D the registers
366 * having different meaning depending on the direction
367 */
368enum dma_ring_reg {
369 TDMA_READ_PTR = 0,
370 RDMA_WRITE_PTR = TDMA_READ_PTR,
371 TDMA_READ_PTR_HI,
372 RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
373 TDMA_CONS_INDEX,
374 RDMA_PROD_INDEX = TDMA_CONS_INDEX,
375 TDMA_PROD_INDEX,
376 RDMA_CONS_INDEX = TDMA_PROD_INDEX,
377 DMA_RING_BUF_SIZE,
378 DMA_START_ADDR,
379 DMA_START_ADDR_HI,
380 DMA_END_ADDR,
381 DMA_END_ADDR_HI,
382 DMA_MBUF_DONE_THRESH,
383 TDMA_FLOW_PERIOD,
384 RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
385 TDMA_WRITE_PTR,
386 RDMA_READ_PTR = TDMA_WRITE_PTR,
387 TDMA_WRITE_PTR_HI,
388 RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
389};
390
391/* GENET v4 supports 40-bits pointer addressing
392 * for obvious reasons the LO and HI word parts
393 * are contiguous, but this offsets the other
394 * registers.
395 */
396static const u8 genet_dma_ring_regs_v4[] = {
397 [TDMA_READ_PTR] = 0x00,
398 [TDMA_READ_PTR_HI] = 0x04,
399 [TDMA_CONS_INDEX] = 0x08,
400 [TDMA_PROD_INDEX] = 0x0C,
401 [DMA_RING_BUF_SIZE] = 0x10,
402 [DMA_START_ADDR] = 0x14,
403 [DMA_START_ADDR_HI] = 0x18,
404 [DMA_END_ADDR] = 0x1C,
405 [DMA_END_ADDR_HI] = 0x20,
406 [DMA_MBUF_DONE_THRESH] = 0x24,
407 [TDMA_FLOW_PERIOD] = 0x28,
408 [TDMA_WRITE_PTR] = 0x2C,
409 [TDMA_WRITE_PTR_HI] = 0x30,
410};
411
412static const u8 genet_dma_ring_regs_v123[] = {
413 [TDMA_READ_PTR] = 0x00,
414 [TDMA_CONS_INDEX] = 0x04,
415 [TDMA_PROD_INDEX] = 0x08,
416 [DMA_RING_BUF_SIZE] = 0x0C,
417 [DMA_START_ADDR] = 0x10,
418 [DMA_END_ADDR] = 0x14,
419 [DMA_MBUF_DONE_THRESH] = 0x18,
420 [TDMA_FLOW_PERIOD] = 0x1C,
421 [TDMA_WRITE_PTR] = 0x20,
422};
423
424/* Set at runtime once GENET version is known */
425static const u8 *genet_dma_ring_regs;
426
427static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700428 unsigned int ring,
429 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800430{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700431 return bcmgenet_readl(priv->base + GENET_TDMA_REG_OFF +
432 (DMA_RING_SIZE * ring) +
433 genet_dma_ring_regs[r]);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800434}
435
436static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700437 unsigned int ring, u32 val,
438 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800439{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700440 bcmgenet_writel(val, priv->base + GENET_TDMA_REG_OFF +
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800441 (DMA_RING_SIZE * ring) +
442 genet_dma_ring_regs[r]);
443}
444
445static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700446 unsigned int ring,
447 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800448{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700449 return bcmgenet_readl(priv->base + GENET_RDMA_REG_OFF +
450 (DMA_RING_SIZE * ring) +
451 genet_dma_ring_regs[r]);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800452}
453
454static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700455 unsigned int ring, u32 val,
456 enum dma_ring_reg r)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800457{
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700458 bcmgenet_writel(val, priv->base + GENET_RDMA_REG_OFF +
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800459 (DMA_RING_SIZE * ring) +
460 genet_dma_ring_regs[r]);
461}
462
Edwin Chan89316fa2017-03-09 16:58:49 -0800463static int bcmgenet_begin(struct net_device *dev)
464{
465 struct bcmgenet_priv *priv = netdev_priv(dev);
466
467 /* Turn on the clock */
468 return clk_prepare_enable(priv->clk);
469}
470
471static void bcmgenet_complete(struct net_device *dev)
472{
473 struct bcmgenet_priv *priv = netdev_priv(dev);
474
475 /* Turn off the clock */
476 clk_disable_unprepare(priv->clk);
477}
478
Philippe Reynesfa92bf02016-09-26 22:31:57 +0200479static int bcmgenet_get_link_ksettings(struct net_device *dev,
480 struct ethtool_link_ksettings *cmd)
Philippe Reynesbac65c42016-07-09 00:54:47 +0200481{
482 if (!netif_running(dev))
483 return -EINVAL;
484
Doug Berger6c97f012017-10-25 15:04:19 -0700485 if (!dev->phydev)
Philippe Reynesbac65c42016-07-09 00:54:47 +0200486 return -ENODEV;
487
Doug Berger6c97f012017-10-25 15:04:19 -0700488 phy_ethtool_ksettings_get(dev->phydev, cmd);
yuval.shaia@oracle.com55141742017-06-13 10:09:46 +0300489
490 return 0;
Philippe Reynesbac65c42016-07-09 00:54:47 +0200491}
492
Philippe Reynesfa92bf02016-09-26 22:31:57 +0200493static int bcmgenet_set_link_ksettings(struct net_device *dev,
494 const struct ethtool_link_ksettings *cmd)
Philippe Reynesbac65c42016-07-09 00:54:47 +0200495{
496 if (!netif_running(dev))
497 return -EINVAL;
498
Doug Berger6c97f012017-10-25 15:04:19 -0700499 if (!dev->phydev)
Philippe Reynesbac65c42016-07-09 00:54:47 +0200500 return -ENODEV;
501
Doug Berger6c97f012017-10-25 15:04:19 -0700502 return phy_ethtool_ksettings_set(dev->phydev, cmd);
Philippe Reynesbac65c42016-07-09 00:54:47 +0200503}
504
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800505static int bcmgenet_set_features(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700506 netdev_features_t features)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800507{
Doug Bergerf63db4e2019-12-17 16:51:11 -0800508 struct bcmgenet_priv *priv = netdev_priv(dev);
509 u32 reg;
510 int ret;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800511
Doug Bergerf63db4e2019-12-17 16:51:11 -0800512 ret = clk_prepare_enable(priv->clk);
513 if (ret)
514 return ret;
515
516 /* Make sure we reflect the value of CRC_CMD_FWD */
517 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
518 priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
519
Doug Bergerf63db4e2019-12-17 16:51:11 -0800520 clk_disable_unprepare(priv->clk);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800521
522 return ret;
523}
524
525static u32 bcmgenet_get_msglevel(struct net_device *dev)
526{
527 struct bcmgenet_priv *priv = netdev_priv(dev);
528
529 return priv->msg_enable;
530}
531
532static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
533{
534 struct bcmgenet_priv *priv = netdev_priv(dev);
535
536 priv->msg_enable = level;
537}
538
Florian Fainelli2f913072015-09-16 16:47:39 -0700539static int bcmgenet_get_coalesce(struct net_device *dev,
540 struct ethtool_coalesce *ec)
541{
542 struct bcmgenet_priv *priv = netdev_priv(dev);
Florian Fainelli9f4ca052018-03-22 18:19:33 -0700543 struct bcmgenet_rx_ring *ring;
544 unsigned int i;
Florian Fainelli2f913072015-09-16 16:47:39 -0700545
546 ec->tx_max_coalesced_frames =
547 bcmgenet_tdma_ring_readl(priv, DESC_INDEX,
548 DMA_MBUF_DONE_THRESH);
Florian Fainelli4a296452015-09-16 16:47:40 -0700549 ec->rx_max_coalesced_frames =
550 bcmgenet_rdma_ring_readl(priv, DESC_INDEX,
551 DMA_MBUF_DONE_THRESH);
552 ec->rx_coalesce_usecs =
553 bcmgenet_rdma_readl(priv, DMA_RING16_TIMEOUT) * 8192 / 1000;
Florian Fainelli2f913072015-09-16 16:47:39 -0700554
Florian Fainelli9f4ca052018-03-22 18:19:33 -0700555 for (i = 0; i < priv->hw_params->rx_queues; i++) {
556 ring = &priv->rx_rings[i];
557 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
558 }
559 ring = &priv->rx_rings[DESC_INDEX];
560 ec->use_adaptive_rx_coalesce |= ring->dim.use_dim;
561
Florian Fainelli2f913072015-09-16 16:47:39 -0700562 return 0;
563}
564
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700565static void bcmgenet_set_rx_coalesce(struct bcmgenet_rx_ring *ring,
566 u32 usecs, u32 pkts)
Florian Fainelli9f4ca052018-03-22 18:19:33 -0700567{
568 struct bcmgenet_priv *priv = ring->priv;
569 unsigned int i = ring->index;
570 u32 reg;
571
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700572 bcmgenet_rdma_ring_writel(priv, i, pkts, DMA_MBUF_DONE_THRESH);
Florian Fainelli9f4ca052018-03-22 18:19:33 -0700573
574 reg = bcmgenet_rdma_readl(priv, DMA_RING0_TIMEOUT + i);
575 reg &= ~DMA_TIMEOUT_MASK;
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700576 reg |= DIV_ROUND_UP(usecs * 1000, 8192);
Florian Fainelli9f4ca052018-03-22 18:19:33 -0700577 bcmgenet_rdma_writel(priv, reg, DMA_RING0_TIMEOUT + i);
578}
579
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700580static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring,
581 struct ethtool_coalesce *ec)
582{
Tal Gilboa8960b382019-01-31 16:44:48 +0200583 struct dim_cq_moder moder;
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700584 u32 usecs, pkts;
585
586 ring->rx_coalesce_usecs = ec->rx_coalesce_usecs;
587 ring->rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
588 usecs = ring->rx_coalesce_usecs;
589 pkts = ring->rx_max_coalesced_frames;
590
591 if (ec->use_adaptive_rx_coalesce && !ring->dim.use_dim) {
Tal Gilboa026a8072018-04-24 13:36:01 +0300592 moder = net_dim_get_def_rx_moderation(ring->dim.dim.mode);
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700593 usecs = moder.usec;
594 pkts = moder.pkts;
595 }
596
597 ring->dim.use_dim = ec->use_adaptive_rx_coalesce;
598 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
599}
600
Florian Fainelli2f913072015-09-16 16:47:39 -0700601static int bcmgenet_set_coalesce(struct net_device *dev,
602 struct ethtool_coalesce *ec)
603{
604 struct bcmgenet_priv *priv = netdev_priv(dev);
605 unsigned int i;
606
Florian Fainelli4a296452015-09-16 16:47:40 -0700607 /* Base system clock is 125Mhz, DMA timeout is this reference clock
608 * divided by 1024, which yields roughly 8.192us, our maximum value
609 * has to fit in the DMA_TIMEOUT_MASK (16 bits)
610 */
Florian Fainelli2f913072015-09-16 16:47:39 -0700611 if (ec->tx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
Florian Fainelli4a296452015-09-16 16:47:40 -0700612 ec->tx_max_coalesced_frames == 0 ||
613 ec->rx_max_coalesced_frames > DMA_INTR_THRESHOLD_MASK ||
614 ec->rx_coalesce_usecs > (DMA_TIMEOUT_MASK * 8) + 1)
615 return -EINVAL;
616
617 if (ec->rx_coalesce_usecs == 0 && ec->rx_max_coalesced_frames == 0)
Florian Fainelli2f913072015-09-16 16:47:39 -0700618 return -EINVAL;
619
620 /* GENET TDMA hardware does not support a configurable timeout, but will
621 * always generate an interrupt either after MBDONE packets have been
Doug Berger556c2cf2017-03-13 17:41:34 -0700622 * transmitted, or when the ring is empty.
Florian Fainelli2f913072015-09-16 16:47:39 -0700623 */
624 if (ec->tx_coalesce_usecs || ec->tx_coalesce_usecs_high ||
Florian Fainelli9f4ca052018-03-22 18:19:33 -0700625 ec->tx_coalesce_usecs_irq || ec->tx_coalesce_usecs_low ||
626 ec->use_adaptive_tx_coalesce)
Florian Fainelli2f913072015-09-16 16:47:39 -0700627 return -EOPNOTSUPP;
628
629 /* Program all TX queues with the same values, as there is no
630 * ethtool knob to do coalescing on a per-queue basis
631 */
632 for (i = 0; i < priv->hw_params->tx_queues; i++)
633 bcmgenet_tdma_ring_writel(priv, i,
634 ec->tx_max_coalesced_frames,
635 DMA_MBUF_DONE_THRESH);
636 bcmgenet_tdma_ring_writel(priv, DESC_INDEX,
637 ec->tx_max_coalesced_frames,
638 DMA_MBUF_DONE_THRESH);
639
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -0700640 for (i = 0; i < priv->hw_params->rx_queues; i++)
641 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[i], ec);
642 bcmgenet_set_ring_rx_coalesce(&priv->rx_rings[DESC_INDEX], ec);
Florian Fainelli4a296452015-09-16 16:47:40 -0700643
Florian Fainelli2f913072015-09-16 16:47:39 -0700644 return 0;
645}
646
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800647/* standard ethtool support functions. */
648enum bcmgenet_stat_type {
649 BCMGENET_STAT_NETDEV = -1,
650 BCMGENET_STAT_MIB_RX,
651 BCMGENET_STAT_MIB_TX,
652 BCMGENET_STAT_RUNT,
653 BCMGENET_STAT_MISC,
Florian Fainellif62ba9c2015-02-28 18:09:16 -0800654 BCMGENET_STAT_SOFT,
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800655};
656
657struct bcmgenet_stats {
658 char stat_string[ETH_GSTRING_LEN];
659 int stat_sizeof;
660 int stat_offset;
661 enum bcmgenet_stat_type type;
662 /* reg offset from UMAC base for misc counters */
663 u16 reg_offset;
664};
665
666#define STAT_NETDEV(m) { \
667 .stat_string = __stringify(m), \
668 .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
669 .stat_offset = offsetof(struct net_device_stats, m), \
670 .type = BCMGENET_STAT_NETDEV, \
671}
672
673#define STAT_GENET_MIB(str, m, _type) { \
674 .stat_string = str, \
675 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
676 .stat_offset = offsetof(struct bcmgenet_priv, m), \
677 .type = _type, \
678}
679
680#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
681#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
682#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
Florian Fainellif62ba9c2015-02-28 18:09:16 -0800683#define STAT_GENET_SOFT_MIB(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_SOFT)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800684
685#define STAT_GENET_MISC(str, m, offset) { \
686 .stat_string = str, \
687 .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
688 .stat_offset = offsetof(struct bcmgenet_priv, m), \
689 .type = BCMGENET_STAT_MISC, \
690 .reg_offset = offset, \
691}
692
Florian Fainelli37a30b42017-03-16 10:27:08 -0700693#define STAT_GENET_Q(num) \
694 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_packets", \
695 tx_rings[num].packets), \
696 STAT_GENET_SOFT_MIB("txq" __stringify(num) "_bytes", \
697 tx_rings[num].bytes), \
698 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_bytes", \
699 rx_rings[num].bytes), \
700 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_packets", \
701 rx_rings[num].packets), \
702 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_errors", \
703 rx_rings[num].errors), \
704 STAT_GENET_SOFT_MIB("rxq" __stringify(num) "_dropped", \
705 rx_rings[num].dropped)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800706
707/* There is a 0xC gap between the end of RX and beginning of TX stats and then
708 * between the end of TX stats and the beginning of the RX RUNT
709 */
710#define BCMGENET_STAT_OFFSET 0xc
711
712/* Hardware counters must be kept in sync because the order/offset
713 * is important here (order in structure declaration = order in hardware)
714 */
715static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
716 /* general stats */
717 STAT_NETDEV(rx_packets),
718 STAT_NETDEV(tx_packets),
719 STAT_NETDEV(rx_bytes),
720 STAT_NETDEV(tx_bytes),
721 STAT_NETDEV(rx_errors),
722 STAT_NETDEV(tx_errors),
723 STAT_NETDEV(rx_dropped),
724 STAT_NETDEV(tx_dropped),
725 STAT_NETDEV(multicast),
726 /* UniMAC RSV counters */
727 STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
728 STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
729 STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
730 STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
731 STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
732 STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
733 STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
734 STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
735 STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
736 STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
737 STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
738 STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
739 STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
740 STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
741 STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
742 STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
743 STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
744 STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
745 STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
746 STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
747 STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
748 STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
749 STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
750 STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
751 STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
752 STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
753 STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
754 STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
755 STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
756 /* UniMAC TSV counters */
757 STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
758 STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
759 STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
760 STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
761 STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
762 STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
763 STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
764 STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
765 STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
766 STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
767 STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
768 STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
769 STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
770 STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
771 STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
772 STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
773 STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
774 STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
775 STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
776 STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
777 STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
778 STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
779 STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
780 STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
781 STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
782 STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
783 STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
784 STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
785 STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
786 /* UniMAC RUNT counters */
787 STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
788 STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
789 STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
790 STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
791 /* Misc UniMAC counters */
792 STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
Doug Bergerffff7132017-03-09 16:58:43 -0800793 UMAC_RBUF_OVFL_CNT_V1),
794 STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
795 UMAC_RBUF_ERR_CNT_V1),
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800796 STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
Florian Fainellif62ba9c2015-02-28 18:09:16 -0800797 STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
798 STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
799 STAT_GENET_SOFT_MIB("tx_dma_failed", mib.tx_dma_failed),
Doug Bergerf1af17c2019-12-17 16:51:15 -0800800 STAT_GENET_SOFT_MIB("tx_realloc_tsb", mib.tx_realloc_tsb),
801 STAT_GENET_SOFT_MIB("tx_realloc_tsb_failed",
802 mib.tx_realloc_tsb_failed),
Florian Fainelli37a30b42017-03-16 10:27:08 -0700803 /* Per TX queues */
804 STAT_GENET_Q(0),
805 STAT_GENET_Q(1),
806 STAT_GENET_Q(2),
807 STAT_GENET_Q(3),
808 STAT_GENET_Q(16),
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800809};
810
811#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
812
813static void bcmgenet_get_drvinfo(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700814 struct ethtool_drvinfo *info)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800815{
816 strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
817 strlcpy(info->version, "v2.0", sizeof(info->version));
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800818}
819
820static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
821{
822 switch (string_set) {
823 case ETH_SS_STATS:
824 return BCMGENET_STATS_LEN;
825 default:
826 return -EOPNOTSUPP;
827 }
828}
829
Florian Fainellic91b7f62014-07-23 10:42:12 -0700830static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
831 u8 *data)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800832{
833 int i;
834
835 switch (stringset) {
836 case ETH_SS_STATS:
837 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
838 memcpy(data + i * ETH_GSTRING_LEN,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700839 bcmgenet_gstrings_stats[i].stat_string,
840 ETH_GSTRING_LEN);
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800841 }
842 break;
843 }
844}
845
Doug Bergerffff7132017-03-09 16:58:43 -0800846static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
847{
848 u16 new_offset;
849 u32 val;
850
851 switch (offset) {
852 case UMAC_RBUF_OVFL_CNT_V1:
853 if (GENET_IS_V2(priv))
854 new_offset = RBUF_OVFL_CNT_V2;
855 else
856 new_offset = RBUF_OVFL_CNT_V3PLUS;
857
858 val = bcmgenet_rbuf_readl(priv, new_offset);
859 /* clear if overflowed */
860 if (val == ~0)
861 bcmgenet_rbuf_writel(priv, 0, new_offset);
862 break;
863 case UMAC_RBUF_ERR_CNT_V1:
864 if (GENET_IS_V2(priv))
865 new_offset = RBUF_ERR_CNT_V2;
866 else
867 new_offset = RBUF_ERR_CNT_V3PLUS;
868
869 val = bcmgenet_rbuf_readl(priv, new_offset);
870 /* clear if overflowed */
871 if (val == ~0)
872 bcmgenet_rbuf_writel(priv, 0, new_offset);
873 break;
874 default:
875 val = bcmgenet_umac_readl(priv, offset);
876 /* clear if overflowed */
877 if (val == ~0)
878 bcmgenet_umac_writel(priv, 0, offset);
879 break;
880 }
881
882 return val;
883}
884
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800885static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
886{
887 int i, j = 0;
888
889 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
890 const struct bcmgenet_stats *s;
891 u8 offset = 0;
892 u32 val = 0;
893 char *p;
894
895 s = &bcmgenet_gstrings_stats[i];
896 switch (s->type) {
897 case BCMGENET_STAT_NETDEV:
Florian Fainellif62ba9c2015-02-28 18:09:16 -0800898 case BCMGENET_STAT_SOFT:
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800899 continue;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800900 case BCMGENET_STAT_RUNT:
Doug Berger1ad3d222017-03-09 16:58:44 -0800901 offset += BCMGENET_STAT_OFFSET;
902 /* fall through */
903 case BCMGENET_STAT_MIB_TX:
904 offset += BCMGENET_STAT_OFFSET;
905 /* fall through */
906 case BCMGENET_STAT_MIB_RX:
Florian Fainellic91b7f62014-07-23 10:42:12 -0700907 val = bcmgenet_umac_readl(priv,
908 UMAC_MIB_START + j + offset);
Doug Berger1ad3d222017-03-09 16:58:44 -0800909 offset = 0; /* Reset Offset */
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800910 break;
911 case BCMGENET_STAT_MISC:
Doug Bergerffff7132017-03-09 16:58:43 -0800912 if (GENET_IS_V1(priv)) {
913 val = bcmgenet_umac_readl(priv, s->reg_offset);
914 /* clear if overflowed */
915 if (val == ~0)
916 bcmgenet_umac_writel(priv, 0,
917 s->reg_offset);
918 } else {
919 val = bcmgenet_update_stat_misc(priv,
920 s->reg_offset);
921 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800922 break;
923 }
924
925 j += s->stat_sizeof;
926 p = (char *)priv + s->stat_offset;
927 *(u32 *)p = val;
928 }
929}
930
931static void bcmgenet_get_ethtool_stats(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -0700932 struct ethtool_stats *stats,
933 u64 *data)
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800934{
935 struct bcmgenet_priv *priv = netdev_priv(dev);
936 int i;
937
938 if (netif_running(dev))
939 bcmgenet_update_mib_counters(priv);
940
941 for (i = 0; i < BCMGENET_STATS_LEN; i++) {
942 const struct bcmgenet_stats *s;
943 char *p;
944
945 s = &bcmgenet_gstrings_stats[i];
946 if (s->type == BCMGENET_STAT_NETDEV)
947 p = (char *)&dev->stats;
948 else
949 p = (char *)priv;
950 p += s->stat_offset;
Eric Dumazet6517eb52016-04-15 10:47:52 -0700951 if (sizeof(unsigned long) != sizeof(u32) &&
952 s->stat_sizeof == sizeof(unsigned long))
953 data[i] = *(unsigned long *)p;
954 else
955 data[i] = *(u32 *)p;
Florian Fainelli1c1008c2014-02-13 16:08:47 -0800956 }
957}
958
Florian Fainelli6ef398e2014-11-25 21:16:35 -0800959static void bcmgenet_eee_enable_set(struct net_device *dev, bool enable)
960{
961 struct bcmgenet_priv *priv = netdev_priv(dev);
962 u32 off = priv->hw_params->tbuf_offset + TBUF_ENERGY_CTRL;
963 u32 reg;
964
965 if (enable && !priv->clk_eee_enabled) {
966 clk_prepare_enable(priv->clk_eee);
967 priv->clk_eee_enabled = true;
968 }
969
970 reg = bcmgenet_umac_readl(priv, UMAC_EEE_CTRL);
971 if (enable)
972 reg |= EEE_EN;
973 else
974 reg &= ~EEE_EN;
975 bcmgenet_umac_writel(priv, reg, UMAC_EEE_CTRL);
976
977 /* Enable EEE and switch to a 27Mhz clock automatically */
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700978 reg = bcmgenet_readl(priv->base + off);
Florian Fainelli6ef398e2014-11-25 21:16:35 -0800979 if (enable)
980 reg |= TBUF_EEE_EN | TBUF_PM_EN;
981 else
982 reg &= ~(TBUF_EEE_EN | TBUF_PM_EN);
Florian Fainelli69d2ea92017-08-29 12:25:31 -0700983 bcmgenet_writel(reg, priv->base + off);
Florian Fainelli6ef398e2014-11-25 21:16:35 -0800984
985 /* Do the same for thing for RBUF */
986 reg = bcmgenet_rbuf_readl(priv, RBUF_ENERGY_CTRL);
987 if (enable)
988 reg |= RBUF_EEE_EN | RBUF_PM_EN;
989 else
990 reg &= ~(RBUF_EEE_EN | RBUF_PM_EN);
991 bcmgenet_rbuf_writel(priv, reg, RBUF_ENERGY_CTRL);
992
993 if (!enable && priv->clk_eee_enabled) {
994 clk_disable_unprepare(priv->clk_eee);
995 priv->clk_eee_enabled = false;
996 }
997
998 priv->eee.eee_enabled = enable;
999 priv->eee.eee_active = enable;
1000}
1001
1002static int bcmgenet_get_eee(struct net_device *dev, struct ethtool_eee *e)
1003{
1004 struct bcmgenet_priv *priv = netdev_priv(dev);
1005 struct ethtool_eee *p = &priv->eee;
1006
1007 if (GENET_IS_V1(priv))
1008 return -EOPNOTSUPP;
1009
Doug Berger6c97f012017-10-25 15:04:19 -07001010 if (!dev->phydev)
1011 return -ENODEV;
1012
Florian Fainelli6ef398e2014-11-25 21:16:35 -08001013 e->eee_enabled = p->eee_enabled;
1014 e->eee_active = p->eee_active;
1015 e->tx_lpi_timer = bcmgenet_umac_readl(priv, UMAC_EEE_LPI_TIMER);
1016
Doug Berger6c97f012017-10-25 15:04:19 -07001017 return phy_ethtool_get_eee(dev->phydev, e);
Florian Fainelli6ef398e2014-11-25 21:16:35 -08001018}
1019
1020static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
1021{
1022 struct bcmgenet_priv *priv = netdev_priv(dev);
1023 struct ethtool_eee *p = &priv->eee;
1024 int ret = 0;
1025
1026 if (GENET_IS_V1(priv))
1027 return -EOPNOTSUPP;
1028
Doug Berger6c97f012017-10-25 15:04:19 -07001029 if (!dev->phydev)
1030 return -ENODEV;
1031
Florian Fainelli6ef398e2014-11-25 21:16:35 -08001032 p->eee_enabled = e->eee_enabled;
1033
1034 if (!p->eee_enabled) {
1035 bcmgenet_eee_enable_set(dev, false);
1036 } else {
Doug Berger6c97f012017-10-25 15:04:19 -07001037 ret = phy_init_eee(dev->phydev, 0);
Florian Fainelli6ef398e2014-11-25 21:16:35 -08001038 if (ret) {
1039 netif_err(priv, hw, dev, "EEE initialization failed\n");
1040 return ret;
1041 }
1042
1043 bcmgenet_umac_writel(priv, e->tx_lpi_timer, UMAC_EEE_LPI_TIMER);
1044 bcmgenet_eee_enable_set(dev, true);
1045 }
1046
Doug Berger6c97f012017-10-25 15:04:19 -07001047 return phy_ethtool_set_eee(dev->phydev, e);
Florian Fainelli6ef398e2014-11-25 21:16:35 -08001048}
1049
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001050/* standard ethtool support functions. */
Julia Lawall70591ab2016-08-31 09:30:45 +02001051static const struct ethtool_ops bcmgenet_ethtool_ops = {
Edwin Chan89316fa2017-03-09 16:58:49 -08001052 .begin = bcmgenet_begin,
1053 .complete = bcmgenet_complete,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001054 .get_strings = bcmgenet_get_strings,
1055 .get_sset_count = bcmgenet_get_sset_count,
1056 .get_ethtool_stats = bcmgenet_get_ethtool_stats,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001057 .get_drvinfo = bcmgenet_get_drvinfo,
1058 .get_link = ethtool_op_get_link,
1059 .get_msglevel = bcmgenet_get_msglevel,
1060 .set_msglevel = bcmgenet_set_msglevel,
Florian Fainelli06ba8372014-07-21 15:29:29 -07001061 .get_wol = bcmgenet_get_wol,
1062 .set_wol = bcmgenet_set_wol,
Florian Fainelli6ef398e2014-11-25 21:16:35 -08001063 .get_eee = bcmgenet_get_eee,
1064 .set_eee = bcmgenet_set_eee,
Florian Fainelli016e7702016-11-15 10:06:38 -08001065 .nway_reset = phy_ethtool_nway_reset,
Florian Fainelli2f913072015-09-16 16:47:39 -07001066 .get_coalesce = bcmgenet_get_coalesce,
1067 .set_coalesce = bcmgenet_set_coalesce,
Philippe Reynesfa92bf02016-09-26 22:31:57 +02001068 .get_link_ksettings = bcmgenet_get_link_ksettings,
1069 .set_link_ksettings = bcmgenet_set_link_ksettings,
Ryan M. Collinsdd1bf472019-08-30 14:49:55 -04001070 .get_ts_info = ethtool_op_get_ts_info,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001071};
1072
1073/* Power down the unimac, based on mode. */
Florian Fainellica8cf342015-03-23 15:09:51 -07001074static int bcmgenet_power_down(struct bcmgenet_priv *priv,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001075 enum bcmgenet_power_mode mode)
1076{
Florian Fainellica8cf342015-03-23 15:09:51 -07001077 int ret = 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001078 u32 reg;
1079
1080 switch (mode) {
1081 case GENET_POWER_CABLE_SENSE:
Doug Berger6c97f012017-10-25 15:04:19 -07001082 phy_detach(priv->dev->phydev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001083 break;
1084
Florian Fainellic3ae64a2014-07-21 15:29:25 -07001085 case GENET_POWER_WOL_MAGIC:
Florian Fainellica8cf342015-03-23 15:09:51 -07001086 ret = bcmgenet_wol_power_down_cfg(priv, mode);
Florian Fainellic3ae64a2014-07-21 15:29:25 -07001087 break;
1088
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001089 case GENET_POWER_PASSIVE:
1090 /* Power down LED */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001091 if (priv->hw_params->flags & GENET_HAS_EXT) {
1092 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
Doug Berger42138082017-03-13 17:41:42 -07001093 if (GENET_IS_V5(priv))
1094 reg |= EXT_PWR_DOWN_PHY_EN |
1095 EXT_PWR_DOWN_PHY_RD |
1096 EXT_PWR_DOWN_PHY_SD |
1097 EXT_PWR_DOWN_PHY_RX |
1098 EXT_PWR_DOWN_PHY_TX |
1099 EXT_IDDQ_GLBL_PWR;
1100 else
1101 reg |= EXT_PWR_DOWN_PHY;
1102
1103 reg |= (EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001104 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
Florian Fainellia642c4f2015-03-23 15:09:56 -07001105
1106 bcmgenet_phy_power_set(priv->dev, false);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001107 }
1108 break;
1109 default:
1110 break;
1111 }
Florian Fainellica8cf342015-03-23 15:09:51 -07001112
YueHaibing0db55092018-11-08 02:08:43 +00001113 return ret;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001114}
1115
1116static void bcmgenet_power_up(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001117 enum bcmgenet_power_mode mode)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001118{
1119 u32 reg;
1120
1121 if (!(priv->hw_params->flags & GENET_HAS_EXT))
1122 return;
1123
1124 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
1125
1126 switch (mode) {
1127 case GENET_POWER_PASSIVE:
Doug Berger42138082017-03-13 17:41:42 -07001128 reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
1129 if (GENET_IS_V5(priv)) {
1130 reg &= ~(EXT_PWR_DOWN_PHY_EN |
1131 EXT_PWR_DOWN_PHY_RD |
1132 EXT_PWR_DOWN_PHY_SD |
1133 EXT_PWR_DOWN_PHY_RX |
1134 EXT_PWR_DOWN_PHY_TX |
1135 EXT_IDDQ_GLBL_PWR);
1136 reg |= EXT_PHY_RESET;
1137 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1138 mdelay(1);
1139
1140 reg &= ~EXT_PHY_RESET;
1141 } else {
1142 reg &= ~EXT_PWR_DOWN_PHY;
1143 reg |= EXT_PWR_DN_EN_LD;
1144 }
1145 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1146 bcmgenet_phy_power_set(priv->dev, true);
Doug Berger42138082017-03-13 17:41:42 -07001147 break;
1148
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001149 case GENET_POWER_CABLE_SENSE:
1150 /* enable APD */
Doug Berger42138082017-03-13 17:41:42 -07001151 if (!GENET_IS_V5(priv)) {
1152 reg |= EXT_PWR_DN_EN_LD;
1153 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
1154 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001155 break;
Florian Fainellic3ae64a2014-07-21 15:29:25 -07001156 case GENET_POWER_WOL_MAGIC:
1157 bcmgenet_wol_power_up_cfg(priv, mode);
1158 return;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001159 default:
1160 break;
1161 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001162}
1163
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001164static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
1165 struct bcmgenet_tx_ring *ring)
1166{
1167 struct enet_cb *tx_cb_ptr;
1168
1169 tx_cb_ptr = ring->cbs;
1170 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
Petri Gynther014012a2015-02-23 11:00:45 -08001171
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001172 /* Advancing local write pointer */
1173 if (ring->write_ptr == ring->end_ptr)
1174 ring->write_ptr = ring->cb_ptr;
1175 else
1176 ring->write_ptr++;
1177
1178 return tx_cb_ptr;
1179}
1180
Doug Berger876dbad2017-07-14 16:12:09 -07001181static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv,
1182 struct bcmgenet_tx_ring *ring)
1183{
1184 struct enet_cb *tx_cb_ptr;
1185
1186 tx_cb_ptr = ring->cbs;
1187 tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
1188
1189 /* Rewinding local write pointer */
1190 if (ring->write_ptr == ring->cb_ptr)
1191 ring->write_ptr = ring->end_ptr;
1192 else
1193 ring->write_ptr--;
1194
1195 return tx_cb_ptr;
1196}
1197
Petri Gynther4055eae2015-03-25 12:35:16 -07001198static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring)
1199{
Petri Gyntheree7d8c22015-03-30 00:28:50 -07001200 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
Petri Gynther4055eae2015-03-25 12:35:16 -07001201 INTRL2_CPU_MASK_SET);
1202}
1203
1204static inline void bcmgenet_rx_ring16_int_enable(struct bcmgenet_rx_ring *ring)
1205{
Petri Gyntheree7d8c22015-03-30 00:28:50 -07001206 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_RXDMA_DONE,
Petri Gynther4055eae2015-03-25 12:35:16 -07001207 INTRL2_CPU_MASK_CLEAR);
1208}
1209
1210static inline void bcmgenet_rx_ring_int_disable(struct bcmgenet_rx_ring *ring)
1211{
1212 bcmgenet_intrl2_1_writel(ring->priv,
1213 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1214 INTRL2_CPU_MASK_SET);
1215}
1216
1217static inline void bcmgenet_rx_ring_int_enable(struct bcmgenet_rx_ring *ring)
1218{
1219 bcmgenet_intrl2_1_writel(ring->priv,
1220 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index),
1221 INTRL2_CPU_MASK_CLEAR);
1222}
1223
Petri Gynther9dbac282015-03-25 12:35:10 -07001224static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001225{
Petri Gyntheree7d8c22015-03-30 00:28:50 -07001226 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001227 INTRL2_CPU_MASK_SET);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001228}
1229
Petri Gynther9dbac282015-03-25 12:35:10 -07001230static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001231{
Petri Gyntheree7d8c22015-03-30 00:28:50 -07001232 bcmgenet_intrl2_0_writel(ring->priv, UMAC_IRQ_TXDMA_DONE,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001233 INTRL2_CPU_MASK_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001234}
1235
Petri Gynther9dbac282015-03-25 12:35:10 -07001236static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001237{
Petri Gynther9dbac282015-03-25 12:35:10 -07001238 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001239 INTRL2_CPU_MASK_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001240}
1241
Petri Gynther9dbac282015-03-25 12:35:10 -07001242static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001243{
Petri Gynther9dbac282015-03-25 12:35:10 -07001244 bcmgenet_intrl2_1_writel(ring->priv, 1 << ring->index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001245 INTRL2_CPU_MASK_SET);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001246}
1247
Doug Bergerf48bed12017-07-14 16:12:10 -07001248/* Simple helper to free a transmit control block's resources
1249 * Returns an skb when the last transmit control block associated with the
1250 * skb is freed. The skb should be freed by the caller if necessary.
1251 */
1252static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev,
1253 struct enet_cb *cb)
1254{
1255 struct sk_buff *skb;
1256
1257 skb = cb->skb;
1258
1259 if (skb) {
1260 cb->skb = NULL;
1261 if (cb == GENET_CB(skb)->first_cb)
1262 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1263 dma_unmap_len(cb, dma_len),
1264 DMA_TO_DEVICE);
1265 else
1266 dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr),
1267 dma_unmap_len(cb, dma_len),
1268 DMA_TO_DEVICE);
1269 dma_unmap_addr_set(cb, dma_addr, 0);
1270
1271 if (cb == GENET_CB(skb)->last_cb)
1272 return skb;
1273
1274 } else if (dma_unmap_addr(cb, dma_addr)) {
1275 dma_unmap_page(dev,
1276 dma_unmap_addr(cb, dma_addr),
1277 dma_unmap_len(cb, dma_len),
1278 DMA_TO_DEVICE);
1279 dma_unmap_addr_set(cb, dma_addr, 0);
1280 }
1281
Wei Yongjun335ab8b2018-03-28 12:51:19 +00001282 return NULL;
Doug Bergerf48bed12017-07-14 16:12:10 -07001283}
1284
1285/* Simple helper to free a receive control block's resources */
1286static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev,
1287 struct enet_cb *cb)
1288{
1289 struct sk_buff *skb;
1290
1291 skb = cb->skb;
1292 cb->skb = NULL;
1293
1294 if (dma_unmap_addr(cb, dma_addr)) {
1295 dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr),
1296 dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE);
1297 dma_unmap_addr_set(cb, dma_addr, 0);
1298 }
1299
1300 return skb;
1301}
1302
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001303/* Unlocked version of the reclaim routine */
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001304static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
1305 struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001306{
1307 struct bcmgenet_priv *priv = netdev_priv(dev);
Petri Gynther66d06752015-03-04 14:30:01 -08001308 unsigned int txbds_processed = 0;
Doug Bergerf48bed12017-07-14 16:12:10 -07001309 unsigned int bytes_compl = 0;
1310 unsigned int pkts_compl = 0;
1311 unsigned int txbds_ready;
1312 unsigned int c_index;
1313 struct sk_buff *skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001314
Doug Bergerd5810ca2017-03-13 17:41:37 -07001315 /* Clear status before servicing to reduce spurious interrupts */
1316 if (ring->index == DESC_INDEX)
1317 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_TXDMA_DONE,
1318 INTRL2_CPU_CLEAR);
1319 else
1320 bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
1321 INTRL2_CPU_CLEAR);
1322
Brian Norris7fc527f2014-07-29 14:34:14 -07001323 /* Compute how many buffers are transmitted since last xmit call */
Doug Bergerc298ede2017-03-13 17:41:33 -07001324 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX)
1325 & DMA_C_INDEX_MASK;
1326 txbds_ready = (c_index - ring->c_index) & DMA_C_INDEX_MASK;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001327
1328 netif_dbg(priv, tx_done, dev,
Petri Gynther66d06752015-03-04 14:30:01 -08001329 "%s ring=%d old_c_index=%u c_index=%u txbds_ready=%u\n",
1330 __func__, ring->index, ring->c_index, c_index, txbds_ready);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001331
1332 /* Reclaim transmitted buffers */
Petri Gynther66d06752015-03-04 14:30:01 -08001333 while (txbds_processed < txbds_ready) {
Doug Bergerf48bed12017-07-14 16:12:10 -07001334 skb = bcmgenet_free_tx_cb(&priv->pdev->dev,
1335 &priv->tx_cbs[ring->clean_ptr]);
1336 if (skb) {
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001337 pkts_compl++;
Doug Bergerf48bed12017-07-14 16:12:10 -07001338 bytes_compl += GENET_CB(skb)->bytes_sent;
Florian Fainellid4fec852017-08-24 15:56:29 -07001339 dev_consume_skb_any(skb);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001340 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001341
Petri Gynther66d06752015-03-04 14:30:01 -08001342 txbds_processed++;
1343 if (likely(ring->clean_ptr < ring->end_ptr))
1344 ring->clean_ptr++;
1345 else
1346 ring->clean_ptr = ring->cb_ptr;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001347 }
1348
Petri Gynther66d06752015-03-04 14:30:01 -08001349 ring->free_bds += txbds_processed;
Doug Bergerc4d453d2017-03-13 17:41:38 -07001350 ring->c_index = c_index;
Petri Gynther66d06752015-03-04 14:30:01 -08001351
Florian Fainelli37a30b42017-03-16 10:27:08 -07001352 ring->packets += pkts_compl;
1353 ring->bytes += bytes_compl;
Petri Gynther55868122016-03-24 11:27:20 -07001354
Doug Berger6d22fe12017-03-09 16:58:50 -08001355 netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
1356 pkts_compl, bytes_compl);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001357
Doug Bergerc4d453d2017-03-13 17:41:38 -07001358 return txbds_processed;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001359}
1360
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001361static unsigned int bcmgenet_tx_reclaim(struct net_device *dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001362 struct bcmgenet_tx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001363{
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001364 unsigned int released;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001365
Doug Bergerb0447ec2017-10-25 15:04:17 -07001366 spin_lock_bh(&ring->lock);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001367 released = __bcmgenet_tx_reclaim(dev, ring);
Doug Bergerb0447ec2017-10-25 15:04:17 -07001368 spin_unlock_bh(&ring->lock);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001369
1370 return released;
1371}
1372
1373static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
1374{
1375 struct bcmgenet_tx_ring *ring =
1376 container_of(napi, struct bcmgenet_tx_ring, napi);
1377 unsigned int work_done = 0;
Doug Berger6d22fe12017-03-09 16:58:50 -08001378 struct netdev_queue *txq;
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001379
Doug Bergerb0447ec2017-10-25 15:04:17 -07001380 spin_lock(&ring->lock);
Doug Berger6d22fe12017-03-09 16:58:50 -08001381 work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
1382 if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
1383 txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
1384 netif_tx_wake_queue(txq);
1385 }
Doug Bergerb0447ec2017-10-25 15:04:17 -07001386 spin_unlock(&ring->lock);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001387
1388 if (work_done == 0) {
1389 napi_complete(napi);
Petri Gynther9dbac282015-03-25 12:35:10 -07001390 ring->int_enable(ring);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001391
1392 return 0;
1393 }
1394
1395 return budget;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001396}
1397
1398static void bcmgenet_tx_reclaim_all(struct net_device *dev)
1399{
1400 struct bcmgenet_priv *priv = netdev_priv(dev);
1401 int i;
1402
1403 if (netif_is_multiqueue(dev)) {
1404 for (i = 0; i < priv->hw_params->tx_queues; i++)
1405 bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
1406 }
1407
1408 bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
1409}
1410
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001411/* Reallocate the SKB to put enough headroom in front of it and insert
1412 * the transmit checksum offsets in the descriptors
1413 */
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001414static struct sk_buff *bcmgenet_add_tsb(struct net_device *dev,
1415 struct sk_buff *skb)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001416{
Doug Bergerf1af17c2019-12-17 16:51:15 -08001417 struct bcmgenet_priv *priv = netdev_priv(dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001418 struct status_64 *status = NULL;
1419 struct sk_buff *new_skb;
1420 u16 offset;
1421 u8 ip_proto;
Florian Fainelli6f894212018-04-02 15:58:55 -07001422 __be16 ip_ver;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001423 u32 tx_csum_info;
1424
1425 if (unlikely(skb_headroom(skb) < sizeof(*status))) {
1426 /* If 64 byte status block enabled, must make sure skb has
1427 * enough headroom for us to insert 64B status block.
1428 */
1429 new_skb = skb_realloc_headroom(skb, sizeof(*status));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001430 if (!new_skb) {
Doug Bergere3fa8582019-12-17 16:51:14 -08001431 dev_kfree_skb_any(skb);
Doug Bergerf1af17c2019-12-17 16:51:15 -08001432 priv->mib.tx_realloc_tsb_failed++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001433 dev->stats.tx_dropped++;
Petri Gyntherbc233332014-10-01 11:30:01 -07001434 return NULL;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001435 }
Doug Bergere3fa8582019-12-17 16:51:14 -08001436 dev_consume_skb_any(skb);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001437 skb = new_skb;
Doug Bergerf1af17c2019-12-17 16:51:15 -08001438 priv->mib.tx_realloc_tsb++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001439 }
1440
1441 skb_push(skb, sizeof(*status));
1442 status = (struct status_64 *)skb->data;
1443
1444 if (skb->ip_summed == CHECKSUM_PARTIAL) {
Florian Fainelli6f894212018-04-02 15:58:55 -07001445 ip_ver = skb->protocol;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001446 switch (ip_ver) {
Florian Fainelli6f894212018-04-02 15:58:55 -07001447 case htons(ETH_P_IP):
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001448 ip_proto = ip_hdr(skb)->protocol;
1449 break;
Florian Fainelli6f894212018-04-02 15:58:55 -07001450 case htons(ETH_P_IPV6):
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001451 ip_proto = ipv6_hdr(skb)->nexthdr;
1452 break;
1453 default:
Doug Bergerdd8e9112019-12-17 16:51:09 -08001454 /* don't use UDP flag */
1455 ip_proto = 0;
1456 break;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001457 }
1458
1459 offset = skb_checksum_start_offset(skb) - sizeof(*status);
1460 tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
Doug Bergerdd8e9112019-12-17 16:51:09 -08001461 (offset + skb->csum_offset) |
1462 STATUS_TX_CSUM_LV;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001463
Doug Bergerdd8e9112019-12-17 16:51:09 -08001464 /* Set the special UDP flag for UDP */
1465 if (ip_proto == IPPROTO_UDP)
1466 tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001467
1468 status->tx_csum_info = tx_csum_info;
1469 }
1470
Petri Gyntherbc233332014-10-01 11:30:01 -07001471 return skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001472}
1473
1474static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
1475{
1476 struct bcmgenet_priv *priv = netdev_priv(dev);
Doug Berger876dbad2017-07-14 16:12:09 -07001477 struct device *kdev = &priv->pdev->dev;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001478 struct bcmgenet_tx_ring *ring = NULL;
Doug Berger876dbad2017-07-14 16:12:09 -07001479 struct enet_cb *tx_cb_ptr;
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001480 struct netdev_queue *txq;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001481 int nr_frags, index;
Doug Berger876dbad2017-07-14 16:12:09 -07001482 dma_addr_t mapping;
1483 unsigned int size;
1484 skb_frag_t *frag;
1485 u32 len_stat;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001486 int ret;
1487 int i;
1488
1489 index = skb_get_queue_mapping(skb);
1490 /* Mapping strategy:
1491 * queue_mapping = 0, unclassified, packet xmited through ring16
1492 * queue_mapping = 1, goes to ring 0. (highest priority queue
1493 * queue_mapping = 2, goes to ring 1.
1494 * queue_mapping = 3, goes to ring 2.
1495 * queue_mapping = 4, goes to ring 3.
1496 */
1497 if (index == 0)
1498 index = DESC_INDEX;
1499 else
1500 index -= 1;
1501
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001502 ring = &priv->tx_rings[index];
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001503 txq = netdev_get_tx_queue(dev, ring->queue);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001504
Petri Gyntherf5a9ec22016-04-05 13:59:59 -07001505 nr_frags = skb_shinfo(skb)->nr_frags;
1506
Doug Bergerb0447ec2017-10-25 15:04:17 -07001507 spin_lock(&ring->lock);
Petri Gyntherf5a9ec22016-04-05 13:59:59 -07001508 if (ring->free_bds <= (nr_frags + 1)) {
1509 if (!netif_tx_queue_stopped(txq)) {
1510 netif_tx_stop_queue(txq);
1511 netdev_err(dev,
1512 "%s: tx ring %d full when queue %d awake\n",
1513 __func__, index, ring->queue);
1514 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001515 ret = NETDEV_TX_BUSY;
1516 goto out;
1517 }
1518
Florian Fainelli474ea9c2014-07-22 11:01:52 -07001519 if (skb_padto(skb, ETH_ZLEN)) {
1520 ret = NETDEV_TX_OK;
1521 goto out;
1522 }
1523
Petri Gynther55868122016-03-24 11:27:20 -07001524 /* Retain how many bytes will be sent on the wire, without TSB inserted
1525 * by transmit checksum offload
1526 */
1527 GENET_CB(skb)->bytes_sent = skb->len;
1528
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001529 /* add the Transmit Status Block */
1530 skb = bcmgenet_add_tsb(dev, skb);
1531 if (!skb) {
1532 ret = NETDEV_TX_OK;
1533 goto out;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001534 }
1535
Doug Berger876dbad2017-07-14 16:12:09 -07001536 for (i = 0; i <= nr_frags; i++) {
1537 tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001538
Gustavo A. R. Silva4fa112f2017-10-26 07:16:01 -05001539 BUG_ON(!tx_cb_ptr);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001540
Doug Berger876dbad2017-07-14 16:12:09 -07001541 if (!i) {
1542 /* Transmit single SKB or head of fragment list */
Doug Bergerf48bed12017-07-14 16:12:10 -07001543 GENET_CB(skb)->first_cb = tx_cb_ptr;
Doug Berger876dbad2017-07-14 16:12:09 -07001544 size = skb_headlen(skb);
1545 mapping = dma_map_single(kdev, skb->data, size,
1546 DMA_TO_DEVICE);
1547 } else {
1548 /* xmit fragment */
Doug Berger876dbad2017-07-14 16:12:09 -07001549 frag = &skb_shinfo(skb)->frags[i - 1];
1550 size = skb_frag_size(frag);
1551 mapping = skb_frag_dma_map(kdev, frag, 0, size,
1552 DMA_TO_DEVICE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001553 }
Doug Berger876dbad2017-07-14 16:12:09 -07001554
1555 ret = dma_mapping_error(kdev, mapping);
1556 if (ret) {
1557 priv->mib.tx_dma_failed++;
1558 netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
1559 ret = NETDEV_TX_OK;
1560 goto out_unmap_frags;
1561 }
1562 dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
1563 dma_unmap_len_set(tx_cb_ptr, dma_len, size);
1564
Doug Bergerf48bed12017-07-14 16:12:10 -07001565 tx_cb_ptr->skb = skb;
1566
Doug Berger876dbad2017-07-14 16:12:09 -07001567 len_stat = (size << DMA_BUFLENGTH_SHIFT) |
1568 (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT);
1569
1570 if (!i) {
1571 len_stat |= DMA_TX_APPEND_CRC | DMA_SOP;
1572 if (skb->ip_summed == CHECKSUM_PARTIAL)
1573 len_stat |= DMA_TX_DO_CSUM;
1574 }
1575 if (i == nr_frags)
1576 len_stat |= DMA_EOP;
1577
1578 dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001579 }
1580
Doug Bergerf48bed12017-07-14 16:12:10 -07001581 GENET_CB(skb)->last_cb = tx_cb_ptr;
Florian Fainellid03825f2014-03-20 10:53:21 -07001582 skb_tx_timestamp(skb);
1583
Florian Fainelliae67bf02015-03-13 12:11:06 -07001584 /* Decrement total BD count and advance our write pointer */
1585 ring->free_bds -= nr_frags + 1;
1586 ring->prod_index += nr_frags + 1;
1587 ring->prod_index &= DMA_P_INDEX_MASK;
1588
Petri Gynthere178c8c2016-04-09 00:20:36 -07001589 netdev_tx_sent_queue(txq, GENET_CB(skb)->bytes_sent);
1590
Jaedon Shin4092e6a2015-02-28 11:48:26 +09001591 if (ring->free_bds <= (MAX_SKB_FRAGS + 1))
Florian Fainellib2cde2c2014-03-20 10:53:23 -07001592 netif_tx_stop_queue(txq);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001593
Florian Westphal6b16f9e2019-04-01 16:42:14 +02001594 if (!netdev_xmit_more() || netif_xmit_stopped(txq))
Florian Fainelliddd0ca52015-03-13 12:11:07 -07001595 /* Packets are ready, update producer index */
1596 bcmgenet_tdma_ring_writel(priv, ring->index,
1597 ring->prod_index, TDMA_PROD_INDEX);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001598out:
Doug Bergerb0447ec2017-10-25 15:04:17 -07001599 spin_unlock(&ring->lock);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001600
1601 return ret;
Doug Berger876dbad2017-07-14 16:12:09 -07001602
1603out_unmap_frags:
1604 /* Back up for failed control block mapping */
1605 bcmgenet_put_txcb(priv, ring);
1606
1607 /* Unmap successfully mapped control blocks */
1608 while (i-- > 0) {
1609 tx_cb_ptr = bcmgenet_put_txcb(priv, ring);
Doug Bergerf48bed12017-07-14 16:12:10 -07001610 bcmgenet_free_tx_cb(kdev, tx_cb_ptr);
Doug Berger876dbad2017-07-14 16:12:09 -07001611 }
1612
1613 dev_kfree_skb(skb);
1614 goto out;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001615}
1616
Petri Gyntherd6707be2015-03-12 15:48:00 -07001617static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv,
1618 struct enet_cb *cb)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001619{
1620 struct device *kdev = &priv->pdev->dev;
1621 struct sk_buff *skb;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001622 struct sk_buff *rx_skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001623 dma_addr_t mapping;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001624
Petri Gyntherd6707be2015-03-12 15:48:00 -07001625 /* Allocate a new Rx skb */
Florian Fainellic91b7f62014-07-23 10:42:12 -07001626 skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
Petri Gyntherd6707be2015-03-12 15:48:00 -07001627 if (!skb) {
1628 priv->mib.alloc_rx_buff_failed++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001629 netif_err(priv, rx_err, priv->dev,
Petri Gyntherd6707be2015-03-12 15:48:00 -07001630 "%s: Rx skb allocation failed\n", __func__);
1631 return NULL;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001632 }
1633
Petri Gyntherd6707be2015-03-12 15:48:00 -07001634 /* DMA-map the new Rx skb */
1635 mapping = dma_map_single(kdev, skb->data, priv->rx_buf_len,
1636 DMA_FROM_DEVICE);
1637 if (dma_mapping_error(kdev, mapping)) {
1638 priv->mib.rx_dma_failed++;
1639 dev_kfree_skb_any(skb);
1640 netif_err(priv, rx_err, priv->dev,
1641 "%s: Rx skb DMA mapping failed\n", __func__);
1642 return NULL;
1643 }
1644
1645 /* Grab the current Rx skb from the ring and DMA-unmap it */
Doug Bergerf48bed12017-07-14 16:12:10 -07001646 rx_skb = bcmgenet_free_rx_cb(kdev, cb);
Petri Gyntherd6707be2015-03-12 15:48:00 -07001647
1648 /* Put the new Rx skb on the ring */
1649 cb->skb = skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001650 dma_unmap_addr_set(cb, dma_addr, mapping);
Doug Bergerf48bed12017-07-14 16:12:10 -07001651 dma_unmap_len_set(cb, dma_len, priv->rx_buf_len);
Petri Gynther8ac467e2015-03-09 13:40:00 -07001652 dmadesc_set_addr(priv, cb->bd_addr, mapping);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001653
Petri Gyntherd6707be2015-03-12 15:48:00 -07001654 /* Return the current Rx skb to caller */
1655 return rx_skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001656}
1657
1658/* bcmgenet_desc_rx - descriptor based rx process.
1659 * this could be called from bottom half, or from NAPI polling method.
1660 */
Petri Gynther4055eae2015-03-25 12:35:16 -07001661static unsigned int bcmgenet_desc_rx(struct bcmgenet_rx_ring *ring,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001662 unsigned int budget)
1663{
Petri Gynther4055eae2015-03-25 12:35:16 -07001664 struct bcmgenet_priv *priv = ring->priv;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001665 struct net_device *dev = priv->dev;
1666 struct enet_cb *cb;
1667 struct sk_buff *skb;
1668 u32 dma_length_status;
1669 unsigned long dma_flag;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001670 int len;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001671 unsigned int rxpktprocessed = 0, rxpkttoprocess;
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001672 unsigned int bytes_processed = 0;
Doug Bergerd5810ca2017-03-13 17:41:37 -07001673 unsigned int p_index, mask;
Petri Gyntherd26ea6c2015-03-10 15:55:00 -07001674 unsigned int discards;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001675
Doug Bergerd5810ca2017-03-13 17:41:37 -07001676 /* Clear status before servicing to reduce spurious interrupts */
1677 if (ring->index == DESC_INDEX) {
1678 bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_DONE,
1679 INTRL2_CPU_CLEAR);
1680 } else {
1681 mask = 1 << (UMAC_IRQ1_RX_INTR_SHIFT + ring->index);
1682 bcmgenet_intrl2_1_writel(priv,
1683 mask,
1684 INTRL2_CPU_CLEAR);
1685 }
1686
Petri Gynther4055eae2015-03-25 12:35:16 -07001687 p_index = bcmgenet_rdma_ring_readl(priv, ring->index, RDMA_PROD_INDEX);
Petri Gyntherd26ea6c2015-03-10 15:55:00 -07001688
1689 discards = (p_index >> DMA_P_INDEX_DISCARD_CNT_SHIFT) &
1690 DMA_P_INDEX_DISCARD_CNT_MASK;
1691 if (discards > ring->old_discards) {
1692 discards = discards - ring->old_discards;
Florian Fainelli37a30b42017-03-16 10:27:08 -07001693 ring->errors += discards;
Petri Gyntherd26ea6c2015-03-10 15:55:00 -07001694 ring->old_discards += discards;
1695
1696 /* Clear HW register when we reach 75% of maximum 0xFFFF */
1697 if (ring->old_discards >= 0xC000) {
1698 ring->old_discards = 0;
Petri Gynther4055eae2015-03-25 12:35:16 -07001699 bcmgenet_rdma_ring_writel(priv, ring->index, 0,
Petri Gyntherd26ea6c2015-03-10 15:55:00 -07001700 RDMA_PROD_INDEX);
1701 }
1702 }
1703
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001704 p_index &= DMA_P_INDEX_MASK;
Doug Bergerc298ede2017-03-13 17:41:33 -07001705 rxpkttoprocess = (p_index - ring->c_index) & DMA_C_INDEX_MASK;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001706
1707 netif_dbg(priv, rx_status, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001708 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001709
1710 while ((rxpktprocessed < rxpkttoprocess) &&
Florian Fainellic91b7f62014-07-23 10:42:12 -07001711 (rxpktprocessed < budget)) {
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001712 struct status_64 *status;
1713 __be16 rx_csum;
1714
Petri Gynther8ac467e2015-03-09 13:40:00 -07001715 cb = &priv->rx_cbs[ring->read_ptr];
Petri Gyntherd6707be2015-03-12 15:48:00 -07001716 skb = bcmgenet_rx_refill(priv, cb);
Florian Fainellib629be52014-09-08 11:37:52 -07001717
Florian Fainellib629be52014-09-08 11:37:52 -07001718 if (unlikely(!skb)) {
Florian Fainelli37a30b42017-03-16 10:27:08 -07001719 ring->dropped++;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001720 goto next;
Florian Fainellib629be52014-09-08 11:37:52 -07001721 }
1722
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001723 status = (struct status_64 *)skb->data;
1724 dma_length_status = status->length_status;
1725 if (dev->features & NETIF_F_RXCSUM) {
Doug Berger81015532019-12-17 16:51:10 -08001726 rx_csum = (__force __be16)(status->rx_csum & 0xffff);
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001727 skb->csum = (__force __wsum)ntohs(rx_csum);
1728 skb->ip_summed = CHECKSUM_COMPLETE;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001729 }
1730
1731 /* DMA flags and length are still valid no matter how
1732 * we got the Receive Status Vector (64B RSB or register)
1733 */
1734 dma_flag = dma_length_status & 0xffff;
1735 len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
1736
1737 netif_dbg(priv, rx_status, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001738 "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
Petri Gynther8ac467e2015-03-09 13:40:00 -07001739 __func__, p_index, ring->c_index,
1740 ring->read_ptr, dma_length_status);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001741
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001742 if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
1743 netif_err(priv, rx_status, dev,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001744 "dropping fragmented packet!\n");
Florian Fainelli37a30b42017-03-16 10:27:08 -07001745 ring->errors++;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001746 dev_kfree_skb_any(skb);
1747 goto next;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001748 }
Petri Gyntherd6707be2015-03-12 15:48:00 -07001749
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001750 /* report errors */
1751 if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
1752 DMA_RX_OV |
1753 DMA_RX_NO |
1754 DMA_RX_LG |
1755 DMA_RX_RXER))) {
1756 netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
Florian Fainellic91b7f62014-07-23 10:42:12 -07001757 (unsigned int)dma_flag);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001758 if (dma_flag & DMA_RX_CRC_ERROR)
1759 dev->stats.rx_crc_errors++;
1760 if (dma_flag & DMA_RX_OV)
1761 dev->stats.rx_over_errors++;
1762 if (dma_flag & DMA_RX_NO)
1763 dev->stats.rx_frame_errors++;
1764 if (dma_flag & DMA_RX_LG)
1765 dev->stats.rx_length_errors++;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001766 dev->stats.rx_errors++;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001767 dev_kfree_skb_any(skb);
1768 goto next;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001769 } /* error packet */
1770
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001771 skb_put(skb, len);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001772
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001773 /* remove RSB and hardware 2bytes added for IP alignment */
1774 skb_pull(skb, 66);
1775 len -= 66;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001776
1777 if (priv->crc_fwd_en) {
1778 skb_trim(skb, len - ETH_FCS_LEN);
1779 len -= ETH_FCS_LEN;
1780 }
1781
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001782 bytes_processed += len;
1783
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001784 /*Finish setting up the received SKB and send it to the kernel*/
1785 skb->protocol = eth_type_trans(skb, priv->dev);
Florian Fainelli37a30b42017-03-16 10:27:08 -07001786 ring->packets++;
1787 ring->bytes += len;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001788 if (dma_flag & DMA_RX_MULT)
1789 dev->stats.multicast++;
1790
1791 /* Notify kernel */
Petri Gynther4055eae2015-03-25 12:35:16 -07001792 napi_gro_receive(&ring->napi, skb);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001793 netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
1794
Petri Gyntherd6707be2015-03-12 15:48:00 -07001795next:
Florian Fainellicf377d82014-10-10 10:51:52 -07001796 rxpktprocessed++;
Petri Gynther8ac467e2015-03-09 13:40:00 -07001797 if (likely(ring->read_ptr < ring->end_ptr))
1798 ring->read_ptr++;
1799 else
1800 ring->read_ptr = ring->cb_ptr;
1801
1802 ring->c_index = (ring->c_index + 1) & DMA_C_INDEX_MASK;
Petri Gynther4055eae2015-03-25 12:35:16 -07001803 bcmgenet_rdma_ring_writel(priv, ring->index, ring->c_index, RDMA_CONS_INDEX);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001804 }
1805
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001806 ring->dim.bytes = bytes_processed;
1807 ring->dim.packets = rxpktprocessed;
1808
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001809 return rxpktprocessed;
1810}
1811
Petri Gynther3ab11332015-03-25 12:35:15 -07001812/* Rx NAPI polling method */
1813static int bcmgenet_rx_poll(struct napi_struct *napi, int budget)
1814{
Petri Gynther4055eae2015-03-25 12:35:16 -07001815 struct bcmgenet_rx_ring *ring = container_of(napi,
1816 struct bcmgenet_rx_ring, napi);
Yamin Friedmanf06d0ca2019-07-23 10:22:47 +03001817 struct dim_sample dim_sample = {};
Petri Gynther3ab11332015-03-25 12:35:15 -07001818 unsigned int work_done;
1819
Petri Gynther4055eae2015-03-25 12:35:16 -07001820 work_done = bcmgenet_desc_rx(ring, budget);
Petri Gynther3ab11332015-03-25 12:35:15 -07001821
1822 if (work_done < budget) {
Eric Dumazeteb96ce02016-04-08 22:06:40 -07001823 napi_complete_done(napi, work_done);
Petri Gynther4055eae2015-03-25 12:35:16 -07001824 ring->int_enable(ring);
Petri Gynther3ab11332015-03-25 12:35:15 -07001825 }
1826
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001827 if (ring->dim.use_dim) {
Tal Gilboa8960b382019-01-31 16:44:48 +02001828 dim_update_sample(ring->dim.event_ctr, ring->dim.packets,
1829 ring->dim.bytes, &dim_sample);
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001830 net_dim(&ring->dim.dim, dim_sample);
1831 }
1832
Petri Gynther3ab11332015-03-25 12:35:15 -07001833 return work_done;
1834}
1835
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001836static void bcmgenet_dim_work(struct work_struct *work)
1837{
Tal Gilboa8960b382019-01-31 16:44:48 +02001838 struct dim *dim = container_of(work, struct dim, work);
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001839 struct bcmgenet_net_dim *ndim =
1840 container_of(dim, struct bcmgenet_net_dim, dim);
1841 struct bcmgenet_rx_ring *ring =
1842 container_of(ndim, struct bcmgenet_rx_ring, dim);
Tal Gilboa8960b382019-01-31 16:44:48 +02001843 struct dim_cq_moder cur_profile =
Tal Gilboa026a8072018-04-24 13:36:01 +03001844 net_dim_get_rx_moderation(dim->mode, dim->profile_ix);
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001845
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07001846 bcmgenet_set_rx_coalesce(ring, cur_profile.usec, cur_profile.pkts);
Tal Gilboac002bd52018-11-05 12:07:52 +02001847 dim->state = DIM_START_MEASURE;
Florian Fainelli9f4ca052018-03-22 18:19:33 -07001848}
1849
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001850/* Assign skb to RX DMA descriptor. */
Petri Gynther8ac467e2015-03-09 13:40:00 -07001851static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv,
1852 struct bcmgenet_rx_ring *ring)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001853{
1854 struct enet_cb *cb;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001855 struct sk_buff *skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001856 int i;
1857
Petri Gynther8ac467e2015-03-09 13:40:00 -07001858 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001859
1860 /* loop here for each buffer needing assign */
Petri Gynther8ac467e2015-03-09 13:40:00 -07001861 for (i = 0; i < ring->size; i++) {
1862 cb = ring->cbs + i;
Petri Gyntherd6707be2015-03-12 15:48:00 -07001863 skb = bcmgenet_rx_refill(priv, cb);
1864 if (skb)
Florian Fainellid4fec852017-08-24 15:56:29 -07001865 dev_consume_skb_any(skb);
Petri Gyntherd6707be2015-03-12 15:48:00 -07001866 if (!cb->skb)
1867 return -ENOMEM;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001868 }
1869
Petri Gyntherd6707be2015-03-12 15:48:00 -07001870 return 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001871}
1872
1873static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
1874{
Doug Bergerf48bed12017-07-14 16:12:10 -07001875 struct sk_buff *skb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001876 struct enet_cb *cb;
1877 int i;
1878
1879 for (i = 0; i < priv->num_rx_bds; i++) {
1880 cb = &priv->rx_cbs[i];
1881
Doug Bergerf48bed12017-07-14 16:12:10 -07001882 skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb);
1883 if (skb)
Florian Fainellid4fec852017-08-24 15:56:29 -07001884 dev_consume_skb_any(skb);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001885 }
1886}
1887
Florian Fainellic91b7f62014-07-23 10:42:12 -07001888static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
Florian Fainellie29585b2014-07-21 15:29:20 -07001889{
1890 u32 reg;
1891
1892 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
Doug Berger88f6c8b2020-03-16 14:44:56 -07001893 if (reg & CMD_SW_RESET)
1894 return;
Florian Fainellie29585b2014-07-21 15:29:20 -07001895 if (enable)
1896 reg |= mask;
1897 else
1898 reg &= ~mask;
1899 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
1900
1901 /* UniMAC stops on a packet boundary, wait for a full-size packet
1902 * to be processed
1903 */
1904 if (enable == 0)
1905 usleep_range(1000, 2000);
1906}
1907
Doug Berger28c2d1a2017-10-25 15:04:13 -07001908static void reset_umac(struct bcmgenet_priv *priv)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001909{
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001910 /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
1911 bcmgenet_rbuf_ctrl_set(priv, 0);
1912 udelay(10);
1913
Doug Berger88f6c8b2020-03-16 14:44:56 -07001914 /* issue soft reset and disable MAC while updating its registers */
1915 bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
Doug Berger612eb1c2020-03-16 14:44:55 -07001916 udelay(2);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001917}
1918
Florian Fainelli909ff5e2014-07-21 15:29:21 -07001919static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
1920{
1921 /* Mask all interrupts.*/
1922 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1923 bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07001924 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
1925 bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07001926}
1927
Florian Fainelli37850e32015-10-17 14:22:46 -07001928static void bcmgenet_link_intr_enable(struct bcmgenet_priv *priv)
1929{
1930 u32 int0_enable = 0;
1931
1932 /* Monitor cable plug/unplugged event for internal PHY, external PHY
1933 * and MoCA PHY
1934 */
1935 if (priv->internal_phy) {
1936 int0_enable |= UMAC_IRQ_LINK_EVENT;
Doug Berger25382b92019-10-16 16:06:32 -07001937 if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
1938 int0_enable |= UMAC_IRQ_PHY_DET_R;
Florian Fainelli37850e32015-10-17 14:22:46 -07001939 } else if (priv->ext_phy) {
1940 int0_enable |= UMAC_IRQ_LINK_EVENT;
1941 } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
1942 if (priv->hw_params->flags & GENET_HAS_MOCA_LINK_DET)
1943 int0_enable |= UMAC_IRQ_LINK_EVENT;
1944 }
1945 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
1946}
1947
Doug Berger28c2d1a2017-10-25 15:04:13 -07001948static void init_umac(struct bcmgenet_priv *priv)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001949{
1950 struct device *kdev = &priv->pdev->dev;
Petri Gyntherb2e97ec2015-03-25 12:35:12 -07001951 u32 reg;
1952 u32 int0_enable = 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001953
1954 dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
1955
Doug Berger28c2d1a2017-10-25 15:04:13 -07001956 reset_umac(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001957
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001958 /* clear tx/rx counter */
1959 bcmgenet_umac_writel(priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -07001960 MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
1961 UMAC_MIB_CTRL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001962 bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
1963
1964 bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
1965
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001966 /* init tx registers, enable TSB */
1967 reg = bcmgenet_tbuf_ctrl_get(priv);
1968 reg |= TBUF_64B_EN;
1969 bcmgenet_tbuf_ctrl_set(priv, reg);
1970
1971 /* init rx registers, enable ip header optimization and RSB */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001972 reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001973 reg |= RBUF_ALIGN_2B | RBUF_64B_EN;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001974 bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
1975
Doug Berger9a9ba2a2020-03-17 17:05:36 -07001976 /* enable rx checksumming */
1977 reg = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
1978 reg |= RBUF_RXCHK_EN | RBUF_L3_PARSE_DIS;
1979 /* If UniMAC forwards CRC, we need to skip over it to get
1980 * a valid CHK bit to be set in the per-packet status word
1981 */
1982 if (priv->crc_fwd_en)
1983 reg |= RBUF_SKIP_FCS;
1984 else
1985 reg &= ~RBUF_SKIP_FCS;
1986 bcmgenet_rbuf_writel(priv, reg, RBUF_CHK_CTRL);
1987
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001988 if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
1989 bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
1990
Florian Fainelli909ff5e2014-07-21 15:29:21 -07001991 bcmgenet_intr_disable(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001992
Florian Fainelli37850e32015-10-17 14:22:46 -07001993 /* Configure backpressure vectors for MoCA */
1994 if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08001995 reg = bcmgenet_bp_mc_get(priv);
1996 reg |= BIT(priv->hw_params->bp_in_en_shift);
1997
1998 /* bp_mask: back pressure mask */
1999 if (netif_is_multiqueue(priv->dev))
2000 reg |= priv->hw_params->bp_in_mask;
2001 else
2002 reg &= ~priv->hw_params->bp_in_mask;
2003 bcmgenet_bp_mc_set(priv, reg);
2004 }
2005
2006 /* Enable MDIO interrupts on GENET v3+ */
2007 if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
Petri Gyntherb2e97ec2015-03-25 12:35:12 -07002008 int0_enable |= (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002009
Petri Gyntherb2e97ec2015-03-25 12:35:12 -07002010 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002011
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002012 dev_dbg(kdev, "done init umac\n");
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002013}
2014
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07002015static void bcmgenet_init_dim(struct bcmgenet_rx_ring *ring,
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002016 void (*cb)(struct work_struct *work))
2017{
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07002018 struct bcmgenet_net_dim *dim = &ring->dim;
2019
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002020 INIT_WORK(&dim->dim.work, cb);
Tal Gilboac002bd52018-11-05 12:07:52 +02002021 dim->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE;
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002022 dim->event_ctr = 0;
2023 dim->packets = 0;
2024 dim->bytes = 0;
2025}
2026
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07002027static void bcmgenet_init_rx_coalesce(struct bcmgenet_rx_ring *ring)
2028{
2029 struct bcmgenet_net_dim *dim = &ring->dim;
Tal Gilboa8960b382019-01-31 16:44:48 +02002030 struct dim_cq_moder moder;
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07002031 u32 usecs, pkts;
2032
2033 usecs = ring->rx_coalesce_usecs;
2034 pkts = ring->rx_max_coalesced_frames;
2035
2036 /* If DIM was enabled, re-apply default parameters */
2037 if (dim->use_dim) {
Tal Gilboa026a8072018-04-24 13:36:01 +03002038 moder = net_dim_get_def_rx_moderation(dim->dim.mode);
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07002039 usecs = moder.usec;
2040 pkts = moder.pkts;
2041 }
2042
2043 bcmgenet_set_rx_coalesce(ring, usecs, pkts);
2044}
2045
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002046/* Initialize a Tx ring along with corresponding hardware registers */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002047static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
2048 unsigned int index, unsigned int size,
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002049 unsigned int start_ptr, unsigned int end_ptr)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002050{
2051 struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
2052 u32 words_per_bd = WORDS_PER_BD(priv);
2053 u32 flow_period_val = 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002054
2055 spin_lock_init(&ring->lock);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002056 ring->priv = priv;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002057 ring->index = index;
2058 if (index == DESC_INDEX) {
2059 ring->queue = 0;
2060 ring->int_enable = bcmgenet_tx_ring16_int_enable;
2061 ring->int_disable = bcmgenet_tx_ring16_int_disable;
2062 } else {
2063 ring->queue = index + 1;
2064 ring->int_enable = bcmgenet_tx_ring_int_enable;
2065 ring->int_disable = bcmgenet_tx_ring_int_disable;
2066 }
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002067 ring->cbs = priv->tx_cbs + start_ptr;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002068 ring->size = size;
Petri Gynther66d06752015-03-04 14:30:01 -08002069 ring->clean_ptr = start_ptr;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002070 ring->c_index = 0;
2071 ring->free_bds = size;
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002072 ring->write_ptr = start_ptr;
2073 ring->cb_ptr = start_ptr;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002074 ring->end_ptr = end_ptr - 1;
2075 ring->prod_index = 0;
2076
2077 /* Set flow period for ring != 16 */
2078 if (index != DESC_INDEX)
2079 flow_period_val = ENET_MAX_MTU_SIZE << 16;
2080
2081 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
2082 bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
2083 bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
2084 /* Disable rate control for now */
2085 bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002086 TDMA_FLOW_PERIOD);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002087 bcmgenet_tdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002088 ((size << DMA_RING_SIZE_SHIFT) |
2089 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002090
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002091 /* Set start and end address, read and write pointers */
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002092 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002093 DMA_START_ADDR);
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002094 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002095 TDMA_READ_PTR);
Petri Gynther4f8b2d72015-02-23 11:00:45 -08002096 bcmgenet_tdma_ring_writel(priv, index, start_ptr * words_per_bd,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002097 TDMA_WRITE_PTR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002098 bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002099 DMA_END_ADDR);
Doug Berger75879352017-10-25 15:04:14 -07002100
2101 /* Initialize Tx NAPI */
Florian Fainelli148965d2020-01-23 09:49:34 -08002102 netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll,
2103 NAPI_POLL_WEIGHT);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002104}
2105
2106/* Initialize a RDMA ring */
2107static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
Petri Gynther8ac467e2015-03-09 13:40:00 -07002108 unsigned int index, unsigned int size,
2109 unsigned int start_ptr, unsigned int end_ptr)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002110{
Petri Gynther8ac467e2015-03-09 13:40:00 -07002111 struct bcmgenet_rx_ring *ring = &priv->rx_rings[index];
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002112 u32 words_per_bd = WORDS_PER_BD(priv);
2113 int ret;
2114
Petri Gynther4055eae2015-03-25 12:35:16 -07002115 ring->priv = priv;
Petri Gynther8ac467e2015-03-09 13:40:00 -07002116 ring->index = index;
Petri Gynther4055eae2015-03-25 12:35:16 -07002117 if (index == DESC_INDEX) {
2118 ring->int_enable = bcmgenet_rx_ring16_int_enable;
2119 ring->int_disable = bcmgenet_rx_ring16_int_disable;
2120 } else {
2121 ring->int_enable = bcmgenet_rx_ring_int_enable;
2122 ring->int_disable = bcmgenet_rx_ring_int_disable;
2123 }
Petri Gynther8ac467e2015-03-09 13:40:00 -07002124 ring->cbs = priv->rx_cbs + start_ptr;
2125 ring->size = size;
2126 ring->c_index = 0;
2127 ring->read_ptr = start_ptr;
2128 ring->cb_ptr = start_ptr;
2129 ring->end_ptr = end_ptr - 1;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002130
Petri Gynther8ac467e2015-03-09 13:40:00 -07002131 ret = bcmgenet_alloc_rx_buffers(priv, ring);
2132 if (ret)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002133 return ret;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002134
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07002135 bcmgenet_init_dim(ring, bcmgenet_dim_work);
2136 bcmgenet_init_rx_coalesce(ring);
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002137
Doug Berger75879352017-10-25 15:04:14 -07002138 /* Initialize Rx NAPI */
2139 netif_napi_add(priv->dev, &ring->napi, bcmgenet_rx_poll,
2140 NAPI_POLL_WEIGHT);
2141
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002142 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
2143 bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
2144 bcmgenet_rdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002145 ((size << DMA_RING_SIZE_SHIFT) |
2146 RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002147 bcmgenet_rdma_ring_writel(priv, index,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002148 (DMA_FC_THRESH_LO <<
2149 DMA_XOFF_THRESHOLD_SHIFT) |
2150 DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
Petri Gynther6f5a2722015-03-06 13:45:00 -08002151
2152 /* Set start and end address, read and write pointers */
Petri Gynther8ac467e2015-03-09 13:40:00 -07002153 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2154 DMA_START_ADDR);
2155 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2156 RDMA_READ_PTR);
2157 bcmgenet_rdma_ring_writel(priv, index, start_ptr * words_per_bd,
2158 RDMA_WRITE_PTR);
2159 bcmgenet_rdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
Petri Gynther6f5a2722015-03-06 13:45:00 -08002160 DMA_END_ADDR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002161
2162 return ret;
2163}
2164
Petri Gynthere2aadb42015-03-25 12:35:14 -07002165static void bcmgenet_enable_tx_napi(struct bcmgenet_priv *priv)
2166{
2167 unsigned int i;
2168 struct bcmgenet_tx_ring *ring;
2169
2170 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2171 ring = &priv->tx_rings[i];
2172 napi_enable(&ring->napi);
Doug Bergerfbf557d2017-10-25 15:04:15 -07002173 ring->int_enable(ring);
Petri Gynthere2aadb42015-03-25 12:35:14 -07002174 }
2175
2176 ring = &priv->tx_rings[DESC_INDEX];
2177 napi_enable(&ring->napi);
Doug Bergerfbf557d2017-10-25 15:04:15 -07002178 ring->int_enable(ring);
Petri Gynthere2aadb42015-03-25 12:35:14 -07002179}
2180
2181static void bcmgenet_disable_tx_napi(struct bcmgenet_priv *priv)
2182{
2183 unsigned int i;
2184 struct bcmgenet_tx_ring *ring;
2185
2186 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2187 ring = &priv->tx_rings[i];
2188 napi_disable(&ring->napi);
2189 }
2190
2191 ring = &priv->tx_rings[DESC_INDEX];
2192 napi_disable(&ring->napi);
2193}
2194
2195static void bcmgenet_fini_tx_napi(struct bcmgenet_priv *priv)
2196{
2197 unsigned int i;
2198 struct bcmgenet_tx_ring *ring;
2199
2200 for (i = 0; i < priv->hw_params->tx_queues; ++i) {
2201 ring = &priv->tx_rings[i];
2202 netif_napi_del(&ring->napi);
2203 }
2204
2205 ring = &priv->tx_rings[DESC_INDEX];
2206 netif_napi_del(&ring->napi);
2207}
2208
Petri Gynther16c6d662015-02-23 11:00:45 -08002209/* Initialize Tx queues
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002210 *
Petri Gynther16c6d662015-02-23 11:00:45 -08002211 * Queues 0-3 are priority-based, each one has 32 descriptors,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002212 * with queue 0 being the highest priority queue.
2213 *
Petri Gynther16c6d662015-02-23 11:00:45 -08002214 * Queue 16 is the default Tx queue with
Petri Gynther51a966a2015-02-23 11:00:46 -08002215 * GENET_Q16_TX_BD_CNT = 256 - 4 * 32 = 128 descriptors.
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002216 *
Petri Gynther16c6d662015-02-23 11:00:45 -08002217 * The transmit control block pool is then partitioned as follows:
2218 * - Tx queue 0 uses tx_cbs[0..31]
2219 * - Tx queue 1 uses tx_cbs[32..63]
2220 * - Tx queue 2 uses tx_cbs[64..95]
2221 * - Tx queue 3 uses tx_cbs[96..127]
2222 * - Tx queue 16 uses tx_cbs[128..255]
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002223 */
Petri Gynther16c6d662015-02-23 11:00:45 -08002224static void bcmgenet_init_tx_queues(struct net_device *dev)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002225{
2226 struct bcmgenet_priv *priv = netdev_priv(dev);
Petri Gynther16c6d662015-02-23 11:00:45 -08002227 u32 i, dma_enable;
2228 u32 dma_ctrl, ring_cfg;
Petri Gynther37742162014-10-07 09:30:01 -07002229 u32 dma_priority[3] = {0, 0, 0};
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002230
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002231 dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
2232 dma_enable = dma_ctrl & DMA_EN;
2233 dma_ctrl &= ~DMA_EN;
2234 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
2235
Petri Gynther16c6d662015-02-23 11:00:45 -08002236 dma_ctrl = 0;
2237 ring_cfg = 0;
2238
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002239 /* Enable strict priority arbiter mode */
2240 bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
2241
Petri Gynther16c6d662015-02-23 11:00:45 -08002242 /* Initialize Tx priority queues */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002243 for (i = 0; i < priv->hw_params->tx_queues; i++) {
Petri Gynther51a966a2015-02-23 11:00:46 -08002244 bcmgenet_init_tx_ring(priv, i, priv->hw_params->tx_bds_per_q,
2245 i * priv->hw_params->tx_bds_per_q,
2246 (i + 1) * priv->hw_params->tx_bds_per_q);
Petri Gynther16c6d662015-02-23 11:00:45 -08002247 ring_cfg |= (1 << i);
2248 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
Petri Gynther37742162014-10-07 09:30:01 -07002249 dma_priority[DMA_PRIO_REG_INDEX(i)] |=
2250 ((GENET_Q0_PRIORITY + i) << DMA_PRIO_REG_SHIFT(i));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002251 }
2252
Petri Gynther16c6d662015-02-23 11:00:45 -08002253 /* Initialize Tx default queue 16 */
Petri Gynther51a966a2015-02-23 11:00:46 -08002254 bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_Q16_TX_BD_CNT,
Petri Gynther16c6d662015-02-23 11:00:45 -08002255 priv->hw_params->tx_queues *
Petri Gynther51a966a2015-02-23 11:00:46 -08002256 priv->hw_params->tx_bds_per_q,
Petri Gynther16c6d662015-02-23 11:00:45 -08002257 TOTAL_DESC);
2258 ring_cfg |= (1 << DESC_INDEX);
2259 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
Petri Gynther37742162014-10-07 09:30:01 -07002260 dma_priority[DMA_PRIO_REG_INDEX(DESC_INDEX)] |=
2261 ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) <<
2262 DMA_PRIO_REG_SHIFT(DESC_INDEX));
Petri Gynther16c6d662015-02-23 11:00:45 -08002263
2264 /* Set Tx queue priorities */
Petri Gynther37742162014-10-07 09:30:01 -07002265 bcmgenet_tdma_writel(priv, dma_priority[0], DMA_PRIORITY_0);
2266 bcmgenet_tdma_writel(priv, dma_priority[1], DMA_PRIORITY_1);
2267 bcmgenet_tdma_writel(priv, dma_priority[2], DMA_PRIORITY_2);
2268
Petri Gynther16c6d662015-02-23 11:00:45 -08002269 /* Enable Tx queues */
2270 bcmgenet_tdma_writel(priv, ring_cfg, DMA_RING_CFG);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002271
Petri Gynther16c6d662015-02-23 11:00:45 -08002272 /* Enable Tx DMA */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002273 if (dma_enable)
Petri Gynther16c6d662015-02-23 11:00:45 -08002274 dma_ctrl |= DMA_EN;
2275 bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002276}
2277
Petri Gynther3ab11332015-03-25 12:35:15 -07002278static void bcmgenet_enable_rx_napi(struct bcmgenet_priv *priv)
2279{
Petri Gynther4055eae2015-03-25 12:35:16 -07002280 unsigned int i;
2281 struct bcmgenet_rx_ring *ring;
2282
2283 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2284 ring = &priv->rx_rings[i];
2285 napi_enable(&ring->napi);
Doug Bergerfbf557d2017-10-25 15:04:15 -07002286 ring->int_enable(ring);
Petri Gynther4055eae2015-03-25 12:35:16 -07002287 }
2288
2289 ring = &priv->rx_rings[DESC_INDEX];
2290 napi_enable(&ring->napi);
Doug Bergerfbf557d2017-10-25 15:04:15 -07002291 ring->int_enable(ring);
Petri Gynther3ab11332015-03-25 12:35:15 -07002292}
2293
2294static void bcmgenet_disable_rx_napi(struct bcmgenet_priv *priv)
2295{
Petri Gynther4055eae2015-03-25 12:35:16 -07002296 unsigned int i;
2297 struct bcmgenet_rx_ring *ring;
2298
2299 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2300 ring = &priv->rx_rings[i];
2301 napi_disable(&ring->napi);
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002302 cancel_work_sync(&ring->dim.dim.work);
Petri Gynther4055eae2015-03-25 12:35:16 -07002303 }
2304
2305 ring = &priv->rx_rings[DESC_INDEX];
2306 napi_disable(&ring->napi);
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002307 cancel_work_sync(&ring->dim.dim.work);
Petri Gynther3ab11332015-03-25 12:35:15 -07002308}
2309
2310static void bcmgenet_fini_rx_napi(struct bcmgenet_priv *priv)
2311{
Petri Gynther4055eae2015-03-25 12:35:16 -07002312 unsigned int i;
2313 struct bcmgenet_rx_ring *ring;
2314
2315 for (i = 0; i < priv->hw_params->rx_queues; ++i) {
2316 ring = &priv->rx_rings[i];
2317 netif_napi_del(&ring->napi);
2318 }
2319
2320 ring = &priv->rx_rings[DESC_INDEX];
2321 netif_napi_del(&ring->napi);
Petri Gynther3ab11332015-03-25 12:35:15 -07002322}
2323
Petri Gynther8ac467e2015-03-09 13:40:00 -07002324/* Initialize Rx queues
2325 *
2326 * Queues 0-15 are priority queues. Hardware Filtering Block (HFB) can be
2327 * used to direct traffic to these queues.
2328 *
2329 * Queue 16 is the default Rx queue with GENET_Q16_RX_BD_CNT descriptors.
2330 */
2331static int bcmgenet_init_rx_queues(struct net_device *dev)
2332{
2333 struct bcmgenet_priv *priv = netdev_priv(dev);
2334 u32 i;
2335 u32 dma_enable;
2336 u32 dma_ctrl;
2337 u32 ring_cfg;
2338 int ret;
2339
2340 dma_ctrl = bcmgenet_rdma_readl(priv, DMA_CTRL);
2341 dma_enable = dma_ctrl & DMA_EN;
2342 dma_ctrl &= ~DMA_EN;
2343 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2344
2345 dma_ctrl = 0;
2346 ring_cfg = 0;
2347
2348 /* Initialize Rx priority queues */
2349 for (i = 0; i < priv->hw_params->rx_queues; i++) {
2350 ret = bcmgenet_init_rx_ring(priv, i,
2351 priv->hw_params->rx_bds_per_q,
2352 i * priv->hw_params->rx_bds_per_q,
2353 (i + 1) *
2354 priv->hw_params->rx_bds_per_q);
2355 if (ret)
2356 return ret;
2357
2358 ring_cfg |= (1 << i);
2359 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2360 }
2361
2362 /* Initialize Rx default queue 16 */
2363 ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, GENET_Q16_RX_BD_CNT,
2364 priv->hw_params->rx_queues *
2365 priv->hw_params->rx_bds_per_q,
2366 TOTAL_DESC);
2367 if (ret)
2368 return ret;
2369
2370 ring_cfg |= (1 << DESC_INDEX);
2371 dma_ctrl |= (1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT));
2372
2373 /* Enable rings */
2374 bcmgenet_rdma_writel(priv, ring_cfg, DMA_RING_CFG);
2375
2376 /* Configure ring as descriptor ring and re-enable DMA if enabled */
2377 if (dma_enable)
2378 dma_ctrl |= DMA_EN;
2379 bcmgenet_rdma_writel(priv, dma_ctrl, DMA_CTRL);
2380
2381 return 0;
2382}
2383
Florian Fainelli4a0c081e2014-09-22 11:54:43 -07002384static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
2385{
2386 int ret = 0;
2387 int timeout = 0;
2388 u32 reg;
Jaedon Shinb6df7d62015-08-21 10:08:26 +09002389 u32 dma_ctrl;
2390 int i;
Florian Fainelli4a0c081e2014-09-22 11:54:43 -07002391
2392 /* Disable TDMA to stop add more frames in TX DMA */
2393 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2394 reg &= ~DMA_EN;
2395 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2396
2397 /* Check TDMA status register to confirm TDMA is disabled */
2398 while (timeout++ < DMA_TIMEOUT_VAL) {
2399 reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
2400 if (reg & DMA_DISABLED)
2401 break;
2402
2403 udelay(1);
2404 }
2405
2406 if (timeout == DMA_TIMEOUT_VAL) {
2407 netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
2408 ret = -ETIMEDOUT;
2409 }
2410
2411 /* Wait 10ms for packet drain in both tx and rx dma */
2412 usleep_range(10000, 20000);
2413
2414 /* Disable RDMA */
2415 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2416 reg &= ~DMA_EN;
2417 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2418
2419 timeout = 0;
2420 /* Check RDMA status register to confirm RDMA is disabled */
2421 while (timeout++ < DMA_TIMEOUT_VAL) {
2422 reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
2423 if (reg & DMA_DISABLED)
2424 break;
2425
2426 udelay(1);
2427 }
2428
2429 if (timeout == DMA_TIMEOUT_VAL) {
2430 netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
2431 ret = -ETIMEDOUT;
2432 }
2433
Jaedon Shinb6df7d62015-08-21 10:08:26 +09002434 dma_ctrl = 0;
2435 for (i = 0; i < priv->hw_params->rx_queues; i++)
2436 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2437 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2438 reg &= ~dma_ctrl;
2439 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2440
2441 dma_ctrl = 0;
2442 for (i = 0; i < priv->hw_params->tx_queues; i++)
2443 dma_ctrl |= (1 << (i + DMA_RING_BUF_EN_SHIFT));
2444 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2445 reg &= ~dma_ctrl;
2446 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2447
Florian Fainelli4a0c081e2014-09-22 11:54:43 -07002448 return ret;
2449}
2450
Petri Gynther9abab962015-03-30 00:29:01 -07002451static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002452{
Petri Gynthere178c8c2016-04-09 00:20:36 -07002453 struct netdev_queue *txq;
Doug Bergerf48bed12017-07-14 16:12:10 -07002454 int i;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002455
Petri Gynther9abab962015-03-30 00:29:01 -07002456 bcmgenet_fini_rx_napi(priv);
2457 bcmgenet_fini_tx_napi(priv);
2458
Markus Elfring399e06a2019-08-22 20:02:56 +02002459 for (i = 0; i < priv->num_tx_bds; i++)
2460 dev_kfree_skb(bcmgenet_free_tx_cb(&priv->pdev->dev,
2461 priv->tx_cbs + i));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002462
Petri Gynthere178c8c2016-04-09 00:20:36 -07002463 for (i = 0; i < priv->hw_params->tx_queues; i++) {
2464 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[i].queue);
2465 netdev_tx_reset_queue(txq);
2466 }
2467
2468 txq = netdev_get_tx_queue(priv->dev, priv->tx_rings[DESC_INDEX].queue);
2469 netdev_tx_reset_queue(txq);
2470
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002471 bcmgenet_free_rx_buffers(priv);
2472 kfree(priv->rx_cbs);
2473 kfree(priv->tx_cbs);
2474}
2475
2476/* init_edma: Initialize DMA control register */
2477static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
2478{
2479 int ret;
Petri Gynther014012a2015-02-23 11:00:45 -08002480 unsigned int i;
2481 struct enet_cb *cb;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002482
Petri Gynther6f5a2722015-03-06 13:45:00 -08002483 netif_dbg(priv, hw, priv->dev, "%s\n", __func__);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002484
Petri Gynther6f5a2722015-03-06 13:45:00 -08002485 /* Initialize common Rx ring structures */
2486 priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
2487 priv->num_rx_bds = TOTAL_DESC;
2488 priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
2489 GFP_KERNEL);
2490 if (!priv->rx_cbs)
2491 return -ENOMEM;
2492
2493 for (i = 0; i < priv->num_rx_bds; i++) {
2494 cb = priv->rx_cbs + i;
2495 cb->bd_addr = priv->rx_bds + i * DMA_DESC_SIZE;
2496 }
2497
Brian Norris7fc527f2014-07-29 14:34:14 -07002498 /* Initialize common TX ring structures */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002499 priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
2500 priv->num_tx_bds = TOTAL_DESC;
Florian Fainellic489be02014-07-23 10:42:15 -07002501 priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
Florian Fainellic91b7f62014-07-23 10:42:12 -07002502 GFP_KERNEL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002503 if (!priv->tx_cbs) {
Petri Gyntherebbd96f2015-03-25 12:35:11 -07002504 kfree(priv->rx_cbs);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002505 return -ENOMEM;
2506 }
2507
Petri Gynther014012a2015-02-23 11:00:45 -08002508 for (i = 0; i < priv->num_tx_bds; i++) {
2509 cb = priv->tx_cbs + i;
2510 cb->bd_addr = priv->tx_bds + i * DMA_DESC_SIZE;
2511 }
2512
Petri Gyntherebbd96f2015-03-25 12:35:11 -07002513 /* Init rDma */
Stefan Wahrena50e3a92019-11-11 20:49:23 +01002514 bcmgenet_rdma_writel(priv, priv->dma_max_burst_length,
2515 DMA_SCB_BURST_SIZE);
Petri Gyntherebbd96f2015-03-25 12:35:11 -07002516
2517 /* Initialize Rx queues */
2518 ret = bcmgenet_init_rx_queues(priv->dev);
2519 if (ret) {
2520 netdev_err(priv->dev, "failed to initialize Rx queues\n");
2521 bcmgenet_free_rx_buffers(priv);
2522 kfree(priv->rx_cbs);
2523 kfree(priv->tx_cbs);
2524 return ret;
2525 }
2526
2527 /* Init tDma */
Stefan Wahrena50e3a92019-11-11 20:49:23 +01002528 bcmgenet_tdma_writel(priv, priv->dma_max_burst_length,
2529 DMA_SCB_BURST_SIZE);
Petri Gyntherebbd96f2015-03-25 12:35:11 -07002530
Petri Gynther16c6d662015-02-23 11:00:45 -08002531 /* Initialize Tx queues */
2532 bcmgenet_init_tx_queues(priv->dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002533
2534 return 0;
2535}
2536
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002537/* Interrupt bottom half */
2538static void bcmgenet_irq_task(struct work_struct *work)
2539{
Doug Berger07c52d62017-03-09 16:58:47 -08002540 unsigned int status;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002541 struct bcmgenet_priv *priv = container_of(
2542 work, struct bcmgenet_priv, bcmgenet_irq_work);
2543
2544 netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
2545
Doug Bergerb0447ec2017-10-25 15:04:17 -07002546 spin_lock_irq(&priv->lock);
Doug Berger07c52d62017-03-09 16:58:47 -08002547 status = priv->irq0_stat;
2548 priv->irq0_stat = 0;
Doug Bergerb0447ec2017-10-25 15:04:17 -07002549 spin_unlock_irq(&priv->lock);
Doug Berger07c52d62017-03-09 16:58:47 -08002550
Doug Berger25382b92019-10-16 16:06:32 -07002551 if (status & UMAC_IRQ_PHY_DET_R &&
Doug Berger0686bd92019-11-05 11:07:26 -08002552 priv->dev->phydev->autoneg != AUTONEG_ENABLE) {
Doug Berger25382b92019-10-16 16:06:32 -07002553 phy_init_hw(priv->dev->phydev);
Doug Berger0686bd92019-11-05 11:07:26 -08002554 genphy_config_aneg(priv->dev->phydev);
2555 }
Doug Berger25382b92019-10-16 16:06:32 -07002556
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002557 /* Link UP/DOWN event */
Doug Berger7de48402019-10-16 16:06:29 -07002558 if (status & UMAC_IRQ_LINK_EVENT)
Heiner Kallweit28b2e0d2018-01-10 21:21:31 +01002559 phy_mac_interrupt(priv->dev->phydev);
Doug Berger25382b92019-10-16 16:06:32 -07002560
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002561}
2562
Petri Gynther4055eae2015-03-25 12:35:16 -07002563/* bcmgenet_isr1: handle Rx and Tx priority queues */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002564static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
2565{
2566 struct bcmgenet_priv *priv = dev_id;
Petri Gynther4055eae2015-03-25 12:35:16 -07002567 struct bcmgenet_rx_ring *rx_ring;
2568 struct bcmgenet_tx_ring *tx_ring;
Doug Berger07c52d62017-03-09 16:58:47 -08002569 unsigned int index, status;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002570
Doug Berger07c52d62017-03-09 16:58:47 -08002571 /* Read irq status */
2572 status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002573 ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
Petri Gynther4055eae2015-03-25 12:35:16 -07002574
Brian Norris7fc527f2014-07-29 14:34:14 -07002575 /* clear interrupts */
Doug Berger07c52d62017-03-09 16:58:47 -08002576 bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002577
2578 netif_dbg(priv, intr, priv->dev,
Doug Berger07c52d62017-03-09 16:58:47 -08002579 "%s: IRQ=0x%x\n", __func__, status);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002580
Petri Gynther4055eae2015-03-25 12:35:16 -07002581 /* Check Rx priority queue interrupts */
2582 for (index = 0; index < priv->hw_params->rx_queues; index++) {
Doug Berger07c52d62017-03-09 16:58:47 -08002583 if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
Petri Gynther4055eae2015-03-25 12:35:16 -07002584 continue;
2585
2586 rx_ring = &priv->rx_rings[index];
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002587 rx_ring->dim.event_ctr++;
Petri Gynther4055eae2015-03-25 12:35:16 -07002588
2589 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2590 rx_ring->int_disable(rx_ring);
Florian Fainellidac916f2016-04-08 22:30:56 -07002591 __napi_schedule_irqoff(&rx_ring->napi);
Petri Gynther4055eae2015-03-25 12:35:16 -07002592 }
2593 }
2594
2595 /* Check Tx priority queue interrupts */
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002596 for (index = 0; index < priv->hw_params->tx_queues; index++) {
Doug Berger07c52d62017-03-09 16:58:47 -08002597 if (!(status & BIT(index)))
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002598 continue;
2599
Petri Gynther4055eae2015-03-25 12:35:16 -07002600 tx_ring = &priv->tx_rings[index];
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002601
Petri Gynther4055eae2015-03-25 12:35:16 -07002602 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2603 tx_ring->int_disable(tx_ring);
Florian Fainellidac916f2016-04-08 22:30:56 -07002604 __napi_schedule_irqoff(&tx_ring->napi);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002605 }
2606 }
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002607
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002608 return IRQ_HANDLED;
2609}
2610
Petri Gynther4055eae2015-03-25 12:35:16 -07002611/* bcmgenet_isr0: handle Rx and Tx default queues + other stuff */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002612static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
2613{
2614 struct bcmgenet_priv *priv = dev_id;
Petri Gynther4055eae2015-03-25 12:35:16 -07002615 struct bcmgenet_rx_ring *rx_ring;
2616 struct bcmgenet_tx_ring *tx_ring;
Doug Berger07c52d62017-03-09 16:58:47 -08002617 unsigned int status;
2618 unsigned long flags;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002619
Doug Berger07c52d62017-03-09 16:58:47 -08002620 /* Read irq status */
2621 status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002622 ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
Petri Gynther4055eae2015-03-25 12:35:16 -07002623
Brian Norris7fc527f2014-07-29 14:34:14 -07002624 /* clear interrupts */
Doug Berger07c52d62017-03-09 16:58:47 -08002625 bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002626
2627 netif_dbg(priv, intr, priv->dev,
Doug Berger07c52d62017-03-09 16:58:47 -08002628 "IRQ=0x%x\n", status);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002629
Doug Berger07c52d62017-03-09 16:58:47 -08002630 if (status & UMAC_IRQ_RXDMA_DONE) {
Petri Gynther4055eae2015-03-25 12:35:16 -07002631 rx_ring = &priv->rx_rings[DESC_INDEX];
Florian Fainelli9f4ca052018-03-22 18:19:33 -07002632 rx_ring->dim.event_ctr++;
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002633
Petri Gynther4055eae2015-03-25 12:35:16 -07002634 if (likely(napi_schedule_prep(&rx_ring->napi))) {
2635 rx_ring->int_disable(rx_ring);
Florian Fainellidac916f2016-04-08 22:30:56 -07002636 __napi_schedule_irqoff(&rx_ring->napi);
Jaedon Shin4092e6a2015-02-28 11:48:26 +09002637 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002638 }
Petri Gynther4055eae2015-03-25 12:35:16 -07002639
Doug Berger07c52d62017-03-09 16:58:47 -08002640 if (status & UMAC_IRQ_TXDMA_DONE) {
Petri Gynther4055eae2015-03-25 12:35:16 -07002641 tx_ring = &priv->tx_rings[DESC_INDEX];
2642
2643 if (likely(napi_schedule_prep(&tx_ring->napi))) {
2644 tx_ring->int_disable(tx_ring);
Florian Fainellidac916f2016-04-08 22:30:56 -07002645 __napi_schedule_irqoff(&tx_ring->napi);
Petri Gynther4055eae2015-03-25 12:35:16 -07002646 }
2647 }
2648
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002649 if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
Doug Berger07c52d62017-03-09 16:58:47 -08002650 status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002651 wake_up(&priv->wq);
2652 }
2653
Doug Berger07c52d62017-03-09 16:58:47 -08002654 /* all other interested interrupts handled in bottom half */
Doug Berger25382b92019-10-16 16:06:32 -07002655 status &= (UMAC_IRQ_LINK_EVENT | UMAC_IRQ_PHY_DET_R);
Doug Berger07c52d62017-03-09 16:58:47 -08002656 if (status) {
2657 /* Save irq status for bottom-half processing. */
2658 spin_lock_irqsave(&priv->lock, flags);
2659 priv->irq0_stat |= status;
2660 spin_unlock_irqrestore(&priv->lock, flags);
2661
2662 schedule_work(&priv->bcmgenet_irq_work);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002663 }
2664
2665 return IRQ_HANDLED;
2666}
2667
Florian Fainelli85620562014-07-21 15:29:23 -07002668static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
2669{
2670 struct bcmgenet_priv *priv = dev_id;
2671
2672 pm_wakeup_event(&priv->pdev->dev, 0);
2673
2674 return IRQ_HANDLED;
2675}
2676
Florian Fainelli4d2e8882015-07-31 11:42:54 -07002677#ifdef CONFIG_NET_POLL_CONTROLLER
2678static void bcmgenet_poll_controller(struct net_device *dev)
2679{
2680 struct bcmgenet_priv *priv = netdev_priv(dev);
2681
2682 /* Invoke the main RX/TX interrupt handler */
2683 disable_irq(priv->irq0);
2684 bcmgenet_isr0(priv->irq0, priv);
2685 enable_irq(priv->irq0);
2686
2687 /* And the interrupt handler for RX/TX priority queues */
2688 disable_irq(priv->irq1);
2689 bcmgenet_isr1(priv->irq1, priv);
2690 enable_irq(priv->irq1);
2691}
2692#endif
2693
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002694static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
2695{
2696 u32 reg;
2697
2698 reg = bcmgenet_rbuf_ctrl_get(priv);
2699 reg |= BIT(1);
2700 bcmgenet_rbuf_ctrl_set(priv, reg);
2701 udelay(10);
2702
2703 reg &= ~BIT(1);
2704 bcmgenet_rbuf_ctrl_set(priv, reg);
2705 udelay(10);
2706}
2707
2708static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002709 unsigned char *addr)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002710{
2711 bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
2712 (addr[2] << 8) | addr[3], UMAC_MAC0);
2713 bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
2714}
2715
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002716/* Returns a reusable dma control register value */
2717static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
2718{
2719 u32 reg;
2720 u32 dma_ctrl;
2721
2722 /* disable DMA */
2723 dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
2724 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2725 reg &= ~dma_ctrl;
2726 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2727
2728 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2729 reg &= ~dma_ctrl;
2730 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2731
2732 bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
2733 udelay(10);
2734 bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
2735
2736 return dma_ctrl;
2737}
2738
2739static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
2740{
2741 u32 reg;
2742
2743 reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
2744 reg |= dma_ctrl;
2745 bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
2746
2747 reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
2748 reg |= dma_ctrl;
2749 bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
2750}
2751
Petri Gynther0034de42015-03-13 14:45:00 -07002752/* bcmgenet_hfb_clear
2753 *
2754 * Clear Hardware Filter Block and disable all filtering.
2755 */
2756static void bcmgenet_hfb_clear(struct bcmgenet_priv *priv)
2757{
2758 u32 i;
2759
2760 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_CTRL);
2761 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS);
2762 bcmgenet_hfb_reg_writel(priv, 0x0, HFB_FLT_ENABLE_V3PLUS + 4);
2763
2764 for (i = DMA_INDEX2RING_0; i <= DMA_INDEX2RING_7; i++)
2765 bcmgenet_rdma_writel(priv, 0x0, i);
2766
2767 for (i = 0; i < (priv->hw_params->hfb_filter_cnt / 4); i++)
2768 bcmgenet_hfb_reg_writel(priv, 0x0,
2769 HFB_FLT_LEN_V3PLUS + i * sizeof(u32));
2770
2771 for (i = 0; i < priv->hw_params->hfb_filter_cnt *
2772 priv->hw_params->hfb_filter_size; i++)
2773 bcmgenet_hfb_writel(priv, 0x0, i * sizeof(u32));
2774}
2775
2776static void bcmgenet_hfb_init(struct bcmgenet_priv *priv)
2777{
2778 if (GENET_IS_V1(priv) || GENET_IS_V2(priv))
2779 return;
2780
2781 bcmgenet_hfb_clear(priv);
2782}
2783
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002784static void bcmgenet_netif_start(struct net_device *dev)
2785{
2786 struct bcmgenet_priv *priv = netdev_priv(dev);
2787
2788 /* Start the network engine */
Petri Gynther3ab11332015-03-25 12:35:15 -07002789 bcmgenet_enable_rx_napi(priv);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002790
2791 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
2792
Doug Bergerd215dba2017-10-25 15:04:16 -07002793 bcmgenet_enable_tx_napi(priv);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002794
Florian Fainelli37850e32015-10-17 14:22:46 -07002795 /* Monitor link interrupts now */
2796 bcmgenet_link_intr_enable(priv);
2797
Doug Berger6c97f012017-10-25 15:04:19 -07002798 phy_start(dev->phydev);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002799}
2800
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002801static int bcmgenet_open(struct net_device *dev)
2802{
2803 struct bcmgenet_priv *priv = netdev_priv(dev);
2804 unsigned long dma_ctrl;
2805 u32 reg;
2806 int ret;
2807
2808 netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
2809
2810 /* Turn on the clock */
Florian Fainelli7d5d3072015-07-22 17:28:23 -07002811 clk_prepare_enable(priv->clk);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002812
Florian Fainellia642c4f2015-03-23 15:09:56 -07002813 /* If this is an internal GPHY, power it back on now, before UniMAC is
2814 * brought out of reset as absolutely no UniMAC activity is allowed
2815 */
Florian Fainellic624f892015-07-16 15:51:17 -07002816 if (priv->internal_phy)
Florian Fainellia642c4f2015-03-23 15:09:56 -07002817 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
2818
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002819 /* take MAC out of reset */
2820 bcmgenet_umac_reset(priv);
2821
Doug Berger28c2d1a2017-10-25 15:04:13 -07002822 init_umac(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002823
Doug Berger206f54b2019-12-17 16:51:12 -08002824 /* Apply features again in case we changed them while interface was
2825 * down
2826 */
2827 bcmgenet_set_features(dev, dev->features);
2828
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002829 bcmgenet_set_hw_addr(priv, dev->dev_addr);
2830
Florian Fainellic624f892015-07-16 15:51:17 -07002831 if (priv->internal_phy) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002832 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
2833 reg |= EXT_ENERGY_DET_MASK;
2834 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
2835 }
2836
2837 /* Disable RX/TX DMA and flush TX queues */
2838 dma_ctrl = bcmgenet_dma_disable(priv);
2839
2840 /* Reinitialize TDMA and RDMA and SW housekeeping */
2841 ret = bcmgenet_init_dma(priv);
2842 if (ret) {
2843 netdev_err(dev, "failed to initialize DMA\n");
Doug Berger6b6d017f2019-11-05 11:07:25 -08002844 goto err_clk_disable;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002845 }
2846
2847 /* Always enable ring 16 - descriptor ring */
2848 bcmgenet_enable_dma(priv, dma_ctrl);
2849
Petri Gynther0034de42015-03-13 14:45:00 -07002850 /* HFB init */
2851 bcmgenet_hfb_init(priv);
2852
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002853 ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002854 dev->name, priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002855 if (ret < 0) {
2856 netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
2857 goto err_fini_dma;
2858 }
2859
2860 ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
Florian Fainellic91b7f62014-07-23 10:42:12 -07002861 dev->name, priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002862 if (ret < 0) {
2863 netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
2864 goto err_irq0;
2865 }
2866
Doug Berger6b6d017f2019-11-05 11:07:25 -08002867 ret = bcmgenet_mii_probe(dev);
2868 if (ret) {
2869 netdev_err(dev, "failed to connect to PHY\n");
2870 goto err_irq1;
2871 }
2872
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002873 bcmgenet_netif_start(dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002874
Doug Berger09e805d2018-11-01 15:55:37 -07002875 netif_tx_start_all_queues(dev);
2876
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002877 return 0;
2878
Doug Berger6b6d017f2019-11-05 11:07:25 -08002879err_irq1:
2880 free_irq(priv->irq1, priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002881err_irq0:
Florian Fainelli978ffac42015-07-16 15:51:15 -07002882 free_irq(priv->irq0, priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002883err_fini_dma:
Doug Berger4fd6dc92017-10-25 15:04:12 -07002884 bcmgenet_dma_teardown(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002885 bcmgenet_fini_dma(priv);
2886err_clk_disable:
Doug Berger76274092017-03-09 16:58:46 -08002887 if (priv->internal_phy)
2888 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
Florian Fainelli7d5d3072015-07-22 17:28:23 -07002889 clk_disable_unprepare(priv->clk);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002890 return ret;
2891}
2892
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002893static void bcmgenet_netif_stop(struct net_device *dev)
2894{
2895 struct bcmgenet_priv *priv = netdev_priv(dev);
2896
Doug Bergerd215dba2017-10-25 15:04:16 -07002897 bcmgenet_disable_tx_napi(priv);
Doug Berger09e805d2018-11-01 15:55:37 -07002898 netif_tx_disable(dev);
Doug Bergerd215dba2017-10-25 15:04:16 -07002899
2900 /* Disable MAC receive */
2901 umac_enable_set(priv, CMD_RX_EN, false);
2902
2903 bcmgenet_dma_teardown(priv);
2904
2905 /* Disable MAC transmit. TX DMA disabled must be done before this */
2906 umac_enable_set(priv, CMD_TX_EN, false);
2907
Doug Berger6c97f012017-10-25 15:04:19 -07002908 phy_stop(dev->phydev);
Petri Gynther3ab11332015-03-25 12:35:15 -07002909 bcmgenet_disable_rx_napi(priv);
Doug Bergerfbf557d2017-10-25 15:04:15 -07002910 bcmgenet_intr_disable(priv);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002911
2912 /* Wait for pending work items to complete. Since interrupts are
2913 * disabled no new work will be scheduled.
2914 */
2915 cancel_work_sync(&priv->bcmgenet_irq_work);
Florian Fainellicc013fb2014-08-11 14:50:43 -07002916
Florian Fainellicc013fb2014-08-11 14:50:43 -07002917 priv->old_link = -1;
Petri Gynther5ad6e6c2014-10-03 12:25:01 -07002918 priv->old_speed = -1;
Florian Fainellicc013fb2014-08-11 14:50:43 -07002919 priv->old_duplex = -1;
Petri Gynther5ad6e6c2014-10-03 12:25:01 -07002920 priv->old_pause = -1;
Doug Bergerd215dba2017-10-25 15:04:16 -07002921
2922 /* tx reclaim */
2923 bcmgenet_tx_reclaim_all(dev);
2924 bcmgenet_fini_dma(priv);
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002925}
2926
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002927static int bcmgenet_close(struct net_device *dev)
2928{
2929 struct bcmgenet_priv *priv = netdev_priv(dev);
Doug Bergerd215dba2017-10-25 15:04:16 -07002930 int ret = 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002931
2932 netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
2933
Florian Fainelli909ff5e2014-07-21 15:29:21 -07002934 bcmgenet_netif_stop(dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002935
Florian Fainellic96e7312014-11-10 18:06:20 -08002936 /* Really kill the PHY state machine and disconnect from it */
Doug Berger6c97f012017-10-25 15:04:19 -07002937 phy_disconnect(dev->phydev);
Florian Fainellic96e7312014-11-10 18:06:20 -08002938
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002939 free_irq(priv->irq0, priv);
2940 free_irq(priv->irq1, priv);
2941
Florian Fainellic624f892015-07-16 15:51:17 -07002942 if (priv->internal_phy)
Florian Fainellica8cf342015-03-23 15:09:51 -07002943 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002944
Florian Fainelli7d5d3072015-07-22 17:28:23 -07002945 clk_disable_unprepare(priv->clk);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002946
Florian Fainellica8cf342015-03-23 15:09:51 -07002947 return ret;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002948}
2949
Florian Fainelli13ea6572015-06-04 16:15:50 -07002950static void bcmgenet_dump_tx_queue(struct bcmgenet_tx_ring *ring)
2951{
2952 struct bcmgenet_priv *priv = ring->priv;
2953 u32 p_index, c_index, intsts, intmsk;
2954 struct netdev_queue *txq;
2955 unsigned int free_bds;
Florian Fainelli13ea6572015-06-04 16:15:50 -07002956 bool txq_stopped;
2957
2958 if (!netif_msg_tx_err(priv))
2959 return;
2960
2961 txq = netdev_get_tx_queue(priv->dev, ring->queue);
2962
Doug Bergerb0447ec2017-10-25 15:04:17 -07002963 spin_lock(&ring->lock);
Florian Fainelli13ea6572015-06-04 16:15:50 -07002964 if (ring->index == DESC_INDEX) {
2965 intsts = ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
2966 intmsk = UMAC_IRQ_TXDMA_DONE | UMAC_IRQ_TXDMA_MBDONE;
2967 } else {
2968 intsts = ~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
2969 intmsk = 1 << ring->index;
2970 }
2971 c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
2972 p_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_PROD_INDEX);
2973 txq_stopped = netif_tx_queue_stopped(txq);
2974 free_bds = ring->free_bds;
Doug Bergerb0447ec2017-10-25 15:04:17 -07002975 spin_unlock(&ring->lock);
Florian Fainelli13ea6572015-06-04 16:15:50 -07002976
2977 netif_err(priv, tx_err, priv->dev, "Ring %d queue %d status summary\n"
2978 "TX queue status: %s, interrupts: %s\n"
2979 "(sw)free_bds: %d (sw)size: %d\n"
2980 "(sw)p_index: %d (hw)p_index: %d\n"
2981 "(sw)c_index: %d (hw)c_index: %d\n"
2982 "(sw)clean_p: %d (sw)write_p: %d\n"
2983 "(sw)cb_ptr: %d (sw)end_ptr: %d\n",
2984 ring->index, ring->queue,
2985 txq_stopped ? "stopped" : "active",
2986 intsts & intmsk ? "enabled" : "disabled",
2987 free_bds, ring->size,
2988 ring->prod_index, p_index & DMA_P_INDEX_MASK,
2989 ring->c_index, c_index & DMA_C_INDEX_MASK,
2990 ring->clean_ptr, ring->write_ptr,
2991 ring->cb_ptr, ring->end_ptr);
2992}
2993
Michael S. Tsirkin0290bd22019-12-10 09:23:51 -05002994static void bcmgenet_timeout(struct net_device *dev, unsigned int txqueue)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08002995{
2996 struct bcmgenet_priv *priv = netdev_priv(dev);
Florian Fainelli13ea6572015-06-04 16:15:50 -07002997 u32 int0_enable = 0;
2998 u32 int1_enable = 0;
2999 unsigned int q;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003000
3001 netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
3002
Florian Fainelli13ea6572015-06-04 16:15:50 -07003003 for (q = 0; q < priv->hw_params->tx_queues; q++)
3004 bcmgenet_dump_tx_queue(&priv->tx_rings[q]);
3005 bcmgenet_dump_tx_queue(&priv->tx_rings[DESC_INDEX]);
3006
3007 bcmgenet_tx_reclaim_all(dev);
3008
3009 for (q = 0; q < priv->hw_params->tx_queues; q++)
3010 int1_enable |= (1 << q);
3011
3012 int0_enable = UMAC_IRQ_TXDMA_DONE;
3013
3014 /* Re-enable TX interrupts if disabled */
3015 bcmgenet_intrl2_0_writel(priv, int0_enable, INTRL2_CPU_MASK_CLEAR);
3016 bcmgenet_intrl2_1_writel(priv, int1_enable, INTRL2_CPU_MASK_CLEAR);
3017
Florian Westphal860e9532016-05-03 16:33:13 +02003018 netif_trans_update(dev);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003019
3020 dev->stats.tx_errors++;
3021
3022 netif_tx_wake_all_queues(dev);
3023}
3024
Justin Chen35cbef92019-07-17 14:58:53 -07003025#define MAX_MDF_FILTER 17
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003026
3027static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
3028 unsigned char *addr,
Justin Chen35cbef92019-07-17 14:58:53 -07003029 int *i)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003030{
Florian Fainellic91b7f62014-07-23 10:42:12 -07003031 bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
3032 UMAC_MDF_ADDR + (*i * 4));
3033 bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
3034 addr[4] << 8 | addr[5],
3035 UMAC_MDF_ADDR + ((*i + 1) * 4));
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003036 *i += 2;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003037}
3038
3039static void bcmgenet_set_rx_mode(struct net_device *dev)
3040{
3041 struct bcmgenet_priv *priv = netdev_priv(dev);
3042 struct netdev_hw_addr *ha;
Justin Chen35cbef92019-07-17 14:58:53 -07003043 int i, nfilter;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003044 u32 reg;
3045
3046 netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
3047
Justin Chen35cbef92019-07-17 14:58:53 -07003048 /* Number of filters needed */
3049 nfilter = netdev_uc_count(dev) + netdev_mc_count(dev) + 2;
3050
3051 /*
3052 * Turn on promicuous mode for three scenarios
3053 * 1. IFF_PROMISC flag is set
3054 * 2. IFF_ALLMULTI flag is set
3055 * 3. The number of filters needed exceeds the number filters
3056 * supported by the hardware.
3057 */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003058 reg = bcmgenet_umac_readl(priv, UMAC_CMD);
Justin Chen35cbef92019-07-17 14:58:53 -07003059 if ((dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) ||
3060 (nfilter > MAX_MDF_FILTER)) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003061 reg |= CMD_PROMISC;
3062 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3063 bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
3064 return;
3065 } else {
3066 reg &= ~CMD_PROMISC;
3067 bcmgenet_umac_writel(priv, reg, UMAC_CMD);
3068 }
3069
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003070 /* update MDF filter */
3071 i = 0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003072 /* Broadcast */
Justin Chen35cbef92019-07-17 14:58:53 -07003073 bcmgenet_set_mdf_addr(priv, dev->broadcast, &i);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003074 /* my own address.*/
Justin Chen35cbef92019-07-17 14:58:53 -07003075 bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003076
Justin Chen35cbef92019-07-17 14:58:53 -07003077 /* Unicast */
3078 netdev_for_each_uc_addr(ha, dev)
3079 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3080
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003081 /* Multicast */
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003082 netdev_for_each_mc_addr(ha, dev)
Justin Chen35cbef92019-07-17 14:58:53 -07003083 bcmgenet_set_mdf_addr(priv, ha->addr, &i);
3084
3085 /* Enable filters */
3086 reg = GENMASK(MAX_MDF_FILTER - 1, MAX_MDF_FILTER - nfilter);
3087 bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003088}
3089
3090/* Set the hardware MAC address. */
3091static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
3092{
3093 struct sockaddr *addr = p;
3094
3095 /* Setting the MAC address at the hardware level is not possible
3096 * without disabling the UniMAC RX/TX enable bits.
3097 */
3098 if (netif_running(dev))
3099 return -EBUSY;
3100
3101 ether_addr_copy(dev->dev_addr, addr->sa_data);
3102
3103 return 0;
3104}
3105
Florian Fainelli37a30b42017-03-16 10:27:08 -07003106static struct net_device_stats *bcmgenet_get_stats(struct net_device *dev)
3107{
3108 struct bcmgenet_priv *priv = netdev_priv(dev);
3109 unsigned long tx_bytes = 0, tx_packets = 0;
3110 unsigned long rx_bytes = 0, rx_packets = 0;
3111 unsigned long rx_errors = 0, rx_dropped = 0;
3112 struct bcmgenet_tx_ring *tx_ring;
3113 struct bcmgenet_rx_ring *rx_ring;
3114 unsigned int q;
3115
3116 for (q = 0; q < priv->hw_params->tx_queues; q++) {
3117 tx_ring = &priv->tx_rings[q];
3118 tx_bytes += tx_ring->bytes;
3119 tx_packets += tx_ring->packets;
3120 }
3121 tx_ring = &priv->tx_rings[DESC_INDEX];
3122 tx_bytes += tx_ring->bytes;
3123 tx_packets += tx_ring->packets;
3124
3125 for (q = 0; q < priv->hw_params->rx_queues; q++) {
3126 rx_ring = &priv->rx_rings[q];
3127
3128 rx_bytes += rx_ring->bytes;
3129 rx_packets += rx_ring->packets;
3130 rx_errors += rx_ring->errors;
3131 rx_dropped += rx_ring->dropped;
3132 }
3133 rx_ring = &priv->rx_rings[DESC_INDEX];
3134 rx_bytes += rx_ring->bytes;
3135 rx_packets += rx_ring->packets;
3136 rx_errors += rx_ring->errors;
3137 rx_dropped += rx_ring->dropped;
3138
3139 dev->stats.tx_bytes = tx_bytes;
3140 dev->stats.tx_packets = tx_packets;
3141 dev->stats.rx_bytes = rx_bytes;
3142 dev->stats.rx_packets = rx_packets;
3143 dev->stats.rx_errors = rx_errors;
3144 dev->stats.rx_missed_errors = rx_errors;
3145 return &dev->stats;
3146}
3147
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003148static const struct net_device_ops bcmgenet_netdev_ops = {
3149 .ndo_open = bcmgenet_open,
3150 .ndo_stop = bcmgenet_close,
3151 .ndo_start_xmit = bcmgenet_xmit,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003152 .ndo_tx_timeout = bcmgenet_timeout,
3153 .ndo_set_rx_mode = bcmgenet_set_rx_mode,
3154 .ndo_set_mac_address = bcmgenet_set_mac_addr,
Heiner Kallweitfd786fb12020-01-21 22:09:33 +01003155 .ndo_do_ioctl = phy_do_ioctl_running,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003156 .ndo_set_features = bcmgenet_set_features,
Florian Fainelli4d2e8882015-07-31 11:42:54 -07003157#ifdef CONFIG_NET_POLL_CONTROLLER
3158 .ndo_poll_controller = bcmgenet_poll_controller,
3159#endif
Florian Fainelli37a30b42017-03-16 10:27:08 -07003160 .ndo_get_stats = bcmgenet_get_stats,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003161};
3162
3163/* Array of GENET hardware parameters/characteristics */
3164static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
3165 [GENET_V1] = {
3166 .tx_queues = 0,
Petri Gynther51a966a2015-02-23 11:00:46 -08003167 .tx_bds_per_q = 0,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003168 .rx_queues = 0,
Petri Gynther3feafa02015-03-05 17:40:14 -08003169 .rx_bds_per_q = 0,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003170 .bp_in_en_shift = 16,
3171 .bp_in_mask = 0xffff,
3172 .hfb_filter_cnt = 16,
3173 .qtag_mask = 0x1F,
3174 .hfb_offset = 0x1000,
3175 .rdma_offset = 0x2000,
3176 .tdma_offset = 0x3000,
3177 .words_per_bd = 2,
3178 },
3179 [GENET_V2] = {
3180 .tx_queues = 4,
Petri Gynther51a966a2015-02-23 11:00:46 -08003181 .tx_bds_per_q = 32,
Petri Gynther7e906e02015-03-05 17:40:10 -08003182 .rx_queues = 0,
Petri Gynther3feafa02015-03-05 17:40:14 -08003183 .rx_bds_per_q = 0,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003184 .bp_in_en_shift = 16,
3185 .bp_in_mask = 0xffff,
3186 .hfb_filter_cnt = 16,
3187 .qtag_mask = 0x1F,
3188 .tbuf_offset = 0x0600,
3189 .hfb_offset = 0x1000,
3190 .hfb_reg_offset = 0x2000,
3191 .rdma_offset = 0x3000,
3192 .tdma_offset = 0x4000,
3193 .words_per_bd = 2,
3194 .flags = GENET_HAS_EXT,
3195 },
3196 [GENET_V3] = {
3197 .tx_queues = 4,
Petri Gynther51a966a2015-02-23 11:00:46 -08003198 .tx_bds_per_q = 32,
Petri Gynther7e906e02015-03-05 17:40:10 -08003199 .rx_queues = 0,
Petri Gynther3feafa02015-03-05 17:40:14 -08003200 .rx_bds_per_q = 0,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003201 .bp_in_en_shift = 17,
3202 .bp_in_mask = 0x1ffff,
3203 .hfb_filter_cnt = 48,
Petri Gynther0034de42015-03-13 14:45:00 -07003204 .hfb_filter_size = 128,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003205 .qtag_mask = 0x3F,
3206 .tbuf_offset = 0x0600,
3207 .hfb_offset = 0x8000,
3208 .hfb_reg_offset = 0xfc00,
3209 .rdma_offset = 0x10000,
3210 .tdma_offset = 0x11000,
3211 .words_per_bd = 2,
Petri Gynther8d88c6e2015-04-01 00:40:00 -07003212 .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR |
3213 GENET_HAS_MOCA_LINK_DET,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003214 },
3215 [GENET_V4] = {
3216 .tx_queues = 4,
Petri Gynther51a966a2015-02-23 11:00:46 -08003217 .tx_bds_per_q = 32,
Petri Gynther7e906e02015-03-05 17:40:10 -08003218 .rx_queues = 0,
Petri Gynther3feafa02015-03-05 17:40:14 -08003219 .rx_bds_per_q = 0,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003220 .bp_in_en_shift = 17,
3221 .bp_in_mask = 0x1ffff,
3222 .hfb_filter_cnt = 48,
Petri Gynther0034de42015-03-13 14:45:00 -07003223 .hfb_filter_size = 128,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003224 .qtag_mask = 0x3F,
3225 .tbuf_offset = 0x0600,
3226 .hfb_offset = 0x8000,
3227 .hfb_reg_offset = 0xfc00,
3228 .rdma_offset = 0x2000,
3229 .tdma_offset = 0x4000,
3230 .words_per_bd = 3,
Petri Gynther8d88c6e2015-04-01 00:40:00 -07003231 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3232 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003233 },
Doug Berger42138082017-03-13 17:41:42 -07003234 [GENET_V5] = {
3235 .tx_queues = 4,
3236 .tx_bds_per_q = 32,
3237 .rx_queues = 0,
3238 .rx_bds_per_q = 0,
3239 .bp_in_en_shift = 17,
3240 .bp_in_mask = 0x1ffff,
3241 .hfb_filter_cnt = 48,
3242 .hfb_filter_size = 128,
3243 .qtag_mask = 0x3F,
3244 .tbuf_offset = 0x0600,
3245 .hfb_offset = 0x8000,
3246 .hfb_reg_offset = 0xfc00,
3247 .rdma_offset = 0x2000,
3248 .tdma_offset = 0x4000,
3249 .words_per_bd = 3,
3250 .flags = GENET_HAS_40BITS | GENET_HAS_EXT |
3251 GENET_HAS_MDIO_INTR | GENET_HAS_MOCA_LINK_DET,
3252 },
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003253};
3254
3255/* Infer hardware parameters from the detected GENET version */
3256static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
3257{
3258 struct bcmgenet_hw_params *params;
3259 u32 reg;
3260 u8 major;
Florian Fainellib04a2f52014-12-03 09:56:59 -08003261 u16 gphy_rev;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003262
Doug Berger42138082017-03-13 17:41:42 -07003263 if (GENET_IS_V5(priv) || GENET_IS_V4(priv)) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003264 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3265 genet_dma_ring_regs = genet_dma_ring_regs_v4;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003266 } else if (GENET_IS_V3(priv)) {
3267 bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
3268 genet_dma_ring_regs = genet_dma_ring_regs_v123;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003269 } else if (GENET_IS_V2(priv)) {
3270 bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
3271 genet_dma_ring_regs = genet_dma_ring_regs_v123;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003272 } else if (GENET_IS_V1(priv)) {
3273 bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
3274 genet_dma_ring_regs = genet_dma_ring_regs_v123;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003275 }
3276
3277 /* enum genet_version starts at 1 */
3278 priv->hw_params = &bcmgenet_hw_params[priv->version];
3279 params = priv->hw_params;
3280
3281 /* Read GENET HW version */
3282 reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
3283 major = (reg >> 24 & 0x0f);
Doug Berger42138082017-03-13 17:41:42 -07003284 if (major == 6)
3285 major = 5;
3286 else if (major == 5)
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003287 major = 4;
3288 else if (major == 0)
3289 major = 1;
3290 if (major != priv->version) {
3291 dev_err(&priv->pdev->dev,
3292 "GENET version mismatch, got: %d, configured for: %d\n",
3293 major, priv->version);
3294 }
3295
3296 /* Print the GENET core version */
3297 dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
Florian Fainellic91b7f62014-07-23 10:42:12 -07003298 major, (reg >> 16) & 0x0f, reg & 0xffff);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003299
Florian Fainelli487320c2014-09-19 13:07:53 -07003300 /* Store the integrated PHY revision for the MDIO probing function
3301 * to pass this information to the PHY driver. The PHY driver expects
3302 * to find the PHY major revision in bits 15:8 while the GENET register
3303 * stores that information in bits 7:0, account for that.
Florian Fainellib04a2f52014-12-03 09:56:59 -08003304 *
3305 * On newer chips, starting with PHY revision G0, a new scheme is
3306 * deployed similar to the Starfighter 2 switch with GPHY major
3307 * revision in bits 15:8 and patch level in bits 7:0. Major revision 0
3308 * is reserved as well as special value 0x01ff, we have a small
3309 * heuristic to check for the new GPHY revision and re-arrange things
3310 * so the GPHY driver is happy.
Florian Fainelli487320c2014-09-19 13:07:53 -07003311 */
Florian Fainellib04a2f52014-12-03 09:56:59 -08003312 gphy_rev = reg & 0xffff;
3313
Doug Berger42138082017-03-13 17:41:42 -07003314 if (GENET_IS_V5(priv)) {
3315 /* The EPHY revision should come from the MDIO registers of
3316 * the PHY not from GENET.
3317 */
3318 if (gphy_rev != 0) {
3319 pr_warn("GENET is reporting EPHY revision: 0x%04x\n",
3320 gphy_rev);
3321 }
Doug Bergereca4bad2017-03-09 16:58:45 -08003322 /* This is reserved so should require special treatment */
David S. Miller101c4312017-03-15 11:59:10 -07003323 } else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
Doug Bergereca4bad2017-03-09 16:58:45 -08003324 pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
3325 return;
Florian Fainellib04a2f52014-12-03 09:56:59 -08003326 /* This is the good old scheme, just GPHY major, no minor nor patch */
Doug Berger42138082017-03-13 17:41:42 -07003327 } else if ((gphy_rev & 0xf0) != 0) {
Florian Fainellib04a2f52014-12-03 09:56:59 -08003328 priv->gphy_rev = gphy_rev << 8;
Florian Fainellib04a2f52014-12-03 09:56:59 -08003329 /* This is the new scheme, GPHY major rolls over with 0x10 = rev G0 */
Doug Berger42138082017-03-13 17:41:42 -07003330 } else if ((gphy_rev & 0xff00) != 0) {
Florian Fainellib04a2f52014-12-03 09:56:59 -08003331 priv->gphy_rev = gphy_rev;
Florian Fainellib04a2f52014-12-03 09:56:59 -08003332 }
Florian Fainelli487320c2014-09-19 13:07:53 -07003333
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003334#ifdef CONFIG_PHYS_ADDR_T_64BIT
3335 if (!(params->flags & GENET_HAS_40BITS))
3336 pr_warn("GENET does not support 40-bits PA\n");
3337#endif
3338
3339 pr_debug("Configuration for version: %d\n"
Petri Gynther3feafa02015-03-05 17:40:14 -08003340 "TXq: %1d, TXqBDs: %1d, RXq: %1d, RXqBDs: %1d\n"
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003341 "BP << en: %2d, BP msk: 0x%05x\n"
3342 "HFB count: %2d, QTAQ msk: 0x%05x\n"
3343 "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
3344 "RDMA: 0x%05x, TDMA: 0x%05x\n"
3345 "Words/BD: %d\n",
3346 priv->version,
Petri Gynther51a966a2015-02-23 11:00:46 -08003347 params->tx_queues, params->tx_bds_per_q,
Petri Gynther3feafa02015-03-05 17:40:14 -08003348 params->rx_queues, params->rx_bds_per_q,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003349 params->bp_in_en_shift, params->bp_in_mask,
3350 params->hfb_filter_cnt, params->qtag_mask,
3351 params->tbuf_offset, params->hfb_offset,
3352 params->hfb_reg_offset,
3353 params->rdma_offset, params->tdma_offset,
3354 params->words_per_bd);
3355}
3356
Stefan Wahrena50e3a92019-11-11 20:49:23 +01003357struct bcmgenet_plat_data {
3358 enum bcmgenet_version version;
3359 u32 dma_max_burst_length;
3360};
3361
3362static const struct bcmgenet_plat_data v1_plat_data = {
3363 .version = GENET_V1,
3364 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3365};
3366
3367static const struct bcmgenet_plat_data v2_plat_data = {
3368 .version = GENET_V2,
3369 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3370};
3371
3372static const struct bcmgenet_plat_data v3_plat_data = {
3373 .version = GENET_V3,
3374 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3375};
3376
3377static const struct bcmgenet_plat_data v4_plat_data = {
3378 .version = GENET_V4,
3379 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3380};
3381
3382static const struct bcmgenet_plat_data v5_plat_data = {
3383 .version = GENET_V5,
3384 .dma_max_burst_length = DMA_MAX_BURST_LENGTH,
3385};
3386
3387static const struct bcmgenet_plat_data bcm2711_plat_data = {
3388 .version = GENET_V5,
3389 .dma_max_burst_length = 0x08,
3390};
3391
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003392static const struct of_device_id bcmgenet_match[] = {
Stefan Wahrena50e3a92019-11-11 20:49:23 +01003393 { .compatible = "brcm,genet-v1", .data = &v1_plat_data },
3394 { .compatible = "brcm,genet-v2", .data = &v2_plat_data },
3395 { .compatible = "brcm,genet-v3", .data = &v3_plat_data },
3396 { .compatible = "brcm,genet-v4", .data = &v4_plat_data },
3397 { .compatible = "brcm,genet-v5", .data = &v5_plat_data },
3398 { .compatible = "brcm,bcm2711-genet-v5", .data = &bcm2711_plat_data },
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003399 { },
3400};
Luis de Bethencourte8048e52015-09-18 17:55:02 +02003401MODULE_DEVICE_TABLE(of, bcmgenet_match);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003402
3403static int bcmgenet_probe(struct platform_device *pdev)
3404{
Petri Gyntherb0ba5122014-12-01 16:18:08 -08003405 struct bcmgenet_platform_data *pd = pdev->dev.platform_data;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003406 struct device_node *dn = pdev->dev.of_node;
Petri Gyntherb0ba5122014-12-01 16:18:08 -08003407 const struct of_device_id *of_id = NULL;
Stefan Wahrena50e3a92019-11-11 20:49:23 +01003408 const struct bcmgenet_plat_data *pdata;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003409 struct bcmgenet_priv *priv;
3410 struct net_device *dev;
3411 const void *macaddr;
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07003412 unsigned int i;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003413 int err = -EIO;
Doug Berger6be371b2017-03-09 16:58:48 -08003414 const char *phy_mode_str;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003415
Petri Gynther3feafee2015-03-05 17:40:12 -08003416 /* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
3417 dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
3418 GENET_MAX_MQ_CNT + 1);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003419 if (!dev) {
3420 dev_err(&pdev->dev, "can't allocate net device\n");
3421 return -ENOMEM;
3422 }
3423
Petri Gyntherb0ba5122014-12-01 16:18:08 -08003424 if (dn) {
3425 of_id = of_match_node(bcmgenet_match, dn);
3426 if (!of_id)
3427 return -EINVAL;
3428 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003429
3430 priv = netdev_priv(dev);
3431 priv->irq0 = platform_get_irq(pdev, 0);
Stefan Wahren2b65f932019-11-11 20:49:21 +01003432 if (priv->irq0 < 0) {
3433 err = priv->irq0;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003434 goto err;
3435 }
Stefan Wahren2b65f932019-11-11 20:49:21 +01003436 priv->irq1 = platform_get_irq(pdev, 1);
3437 if (priv->irq1 < 0) {
3438 err = priv->irq1;
3439 goto err;
3440 }
3441 priv->wol_irq = platform_get_irq_optional(pdev, 2);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003442
Florian Fainellid0337162019-10-14 14:20:00 -07003443 if (dn)
Petri Gyntherb0ba5122014-12-01 16:18:08 -08003444 macaddr = of_get_mac_address(dn);
Florian Fainellid0337162019-10-14 14:20:00 -07003445 else
Petri Gyntherb0ba5122014-12-01 16:18:08 -08003446 macaddr = pd->mac_address;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003447
YueHaibing4ca33482019-08-21 21:41:31 +08003448 priv->base = devm_platform_ioremap_resource(pdev, 0);
Fabio Estevam5343a102014-02-24 00:47:24 -03003449 if (IS_ERR(priv->base)) {
3450 err = PTR_ERR(priv->base);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003451 goto err;
3452 }
3453
Doug Berger07c52d62017-03-09 16:58:47 -08003454 spin_lock_init(&priv->lock);
3455
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003456 SET_NETDEV_DEV(dev, &pdev->dev);
3457 dev_set_drvdata(&pdev->dev, dev);
Florian Fainellid0337162019-10-14 14:20:00 -07003458 if (IS_ERR_OR_NULL(macaddr) || !is_valid_ether_addr(macaddr)) {
3459 dev_warn(&pdev->dev, "using random Ethernet MAC\n");
3460 eth_hw_addr_random(dev);
3461 } else {
3462 ether_addr_copy(dev->dev_addr, macaddr);
3463 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003464 dev->watchdog_timeo = 2 * HZ;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003465 dev->ethtool_ops = &bcmgenet_ethtool_ops;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003466 dev->netdev_ops = &bcmgenet_netdev_ops;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003467
3468 priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
3469
Doug Bergerae895c42019-12-17 16:51:13 -08003470 /* Set default features */
3471 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
3472 NETIF_F_RXCSUM;
3473 dev->hw_features |= dev->features;
3474 dev->vlan_features |= dev->features;
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003475
Florian Fainelli85620562014-07-21 15:29:23 -07003476 /* Request the WOL interrupt and advertise suspend if available */
3477 priv->wol_irq_disabled = true;
3478 err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
3479 dev->name, priv);
3480 if (!err)
3481 device_set_wakeup_capable(&pdev->dev, 1);
3482
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003483 /* Set the needed headroom to account for any possible
3484 * features enabling/disabling at runtime
3485 */
3486 dev->needed_headroom += 64;
3487
3488 netdev_boot_setup_check(dev);
3489
3490 priv->dev = dev;
3491 priv->pdev = pdev;
Stefan Wahrena50e3a92019-11-11 20:49:23 +01003492 if (of_id) {
3493 pdata = of_id->data;
3494 priv->version = pdata->version;
3495 priv->dma_max_burst_length = pdata->dma_max_burst_length;
3496 } else {
Petri Gyntherb0ba5122014-12-01 16:18:08 -08003497 priv->version = pd->genet_version;
Stefan Wahrena50e3a92019-11-11 20:49:23 +01003498 priv->dma_max_burst_length = DMA_MAX_BURST_LENGTH;
3499 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003500
Florian Fainellie4a60a92014-08-11 14:50:42 -07003501 priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003502 if (IS_ERR(priv->clk)) {
Florian Fainellie4a60a92014-08-11 14:50:42 -07003503 dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003504 priv->clk = NULL;
3505 }
Florian Fainellie4a60a92014-08-11 14:50:42 -07003506
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003507 clk_prepare_enable(priv->clk);
Florian Fainellie4a60a92014-08-11 14:50:42 -07003508
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003509 bcmgenet_set_hw_params(priv);
3510
Doug Berger99d55632019-12-17 16:51:08 -08003511 err = -EIO;
3512 if (priv->hw_params->flags & GENET_HAS_40BITS)
3513 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40));
3514 if (err)
3515 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3516 if (err)
3517 goto err;
3518
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003519 /* Mii wait queue */
3520 init_waitqueue_head(&priv->wq);
3521 /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
3522 priv->rx_buf_len = RX_BUF_LENGTH;
3523 INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
3524
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003525 priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003526 if (IS_ERR(priv->clk_wol)) {
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003527 dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003528 priv->clk_wol = NULL;
3529 }
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003530
Florian Fainelli6ef398e2014-11-25 21:16:35 -08003531 priv->clk_eee = devm_clk_get(&priv->pdev->dev, "enet-eee");
3532 if (IS_ERR(priv->clk_eee)) {
3533 dev_warn(&priv->pdev->dev, "failed to get enet-eee clock\n");
3534 priv->clk_eee = NULL;
3535 }
3536
Doug Berger6be371b2017-03-09 16:58:48 -08003537 /* If this is an internal GPHY, power it on now, before UniMAC is
3538 * brought out of reset as absolutely no UniMAC activity is allowed
3539 */
3540 if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
3541 !strcasecmp(phy_mode_str, "internal"))
3542 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3543
Doug Berger28c2d1a2017-10-25 15:04:13 -07003544 reset_umac(priv);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003545
3546 err = bcmgenet_mii_init(dev);
3547 if (err)
3548 goto err_clk_disable;
3549
3550 /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
3551 * just the ring 16 descriptor based TX
3552 */
3553 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
3554 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
3555
Florian Fainelli5e6ce1f2018-03-28 15:15:38 -07003556 /* Set default coalescing parameters */
3557 for (i = 0; i < priv->hw_params->rx_queues; i++)
3558 priv->rx_rings[i].rx_max_coalesced_frames = 1;
3559 priv->rx_rings[DESC_INDEX].rx_max_coalesced_frames = 1;
3560
Florian Fainelli219575e2014-06-26 10:26:21 -07003561 /* libphy will determine the link state */
3562 netif_carrier_off(dev);
3563
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003564 /* Turn off the main clock, WOL clock is handled separately */
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003565 clk_disable_unprepare(priv->clk);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003566
Florian Fainelli0f50ce92014-06-26 10:26:20 -07003567 err = register_netdev(dev);
3568 if (err)
3569 goto err;
3570
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003571 return err;
3572
3573err_clk_disable:
Florian Fainelli7d5d3072015-07-22 17:28:23 -07003574 clk_disable_unprepare(priv->clk);
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003575err:
3576 free_netdev(dev);
3577 return err;
3578}
3579
3580static int bcmgenet_remove(struct platform_device *pdev)
3581{
3582 struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
3583
3584 dev_set_drvdata(&pdev->dev, NULL);
3585 unregister_netdev(priv->dev);
3586 bcmgenet_mii_exit(priv->dev);
3587 free_netdev(priv->dev);
3588
3589 return 0;
3590}
3591
Florian Fainellid9f45ab2019-10-15 10:36:24 -07003592static void bcmgenet_shutdown(struct platform_device *pdev)
3593{
3594 bcmgenet_remove(pdev);
3595}
3596
Florian Fainellib6e978e2014-07-21 15:29:22 -07003597#ifdef CONFIG_PM_SLEEP
Florian Fainellib6e978e2014-07-21 15:29:22 -07003598static int bcmgenet_resume(struct device *d)
3599{
3600 struct net_device *dev = dev_get_drvdata(d);
3601 struct bcmgenet_priv *priv = netdev_priv(dev);
3602 unsigned long dma_ctrl;
3603 int ret;
3604 u32 reg;
3605
3606 if (!netif_running(dev))
3607 return 0;
3608
3609 /* Turn on the clock */
3610 ret = clk_prepare_enable(priv->clk);
3611 if (ret)
3612 return ret;
3613
Florian Fainellia6f31f52015-03-23 15:09:57 -07003614 /* If this is an internal GPHY, power it back on now, before UniMAC is
3615 * brought out of reset as absolutely no UniMAC activity is allowed
3616 */
Florian Fainellic624f892015-07-16 15:51:17 -07003617 if (priv->internal_phy)
Florian Fainellia6f31f52015-03-23 15:09:57 -07003618 bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
3619
Florian Fainellib6e978e2014-07-21 15:29:22 -07003620 bcmgenet_umac_reset(priv);
3621
Doug Berger28c2d1a2017-10-25 15:04:13 -07003622 init_umac(priv);
Florian Fainellib6e978e2014-07-21 15:29:22 -07003623
Tobias Klauser0a29b3d2014-09-23 15:19:41 +02003624 /* From WOL-enabled suspend, switch to regular clock */
3625 if (priv->wolopts)
3626 clk_disable_unprepare(priv->clk_wol);
3627
Doug Berger6b6d017f2019-11-05 11:07:25 -08003628 phy_init_hw(dev->phydev);
3629
Tobias Klauser0a29b3d2014-09-23 15:19:41 +02003630 /* Speed settings must be restored */
Doug Berger0686bd92019-11-05 11:07:26 -08003631 genphy_config_aneg(dev->phydev);
Florian Fainelli00d51092017-07-31 11:05:32 -07003632 bcmgenet_mii_config(priv->dev, false);
Florian Fainelli8c90db72014-07-21 15:29:28 -07003633
Doug Berger206f54b2019-12-17 16:51:12 -08003634 /* Restore enabled features */
3635 bcmgenet_set_features(dev, dev->features);
3636
Florian Fainellib6e978e2014-07-21 15:29:22 -07003637 bcmgenet_set_hw_addr(priv, dev->dev_addr);
3638
Florian Fainellic624f892015-07-16 15:51:17 -07003639 if (priv->internal_phy) {
Florian Fainellib6e978e2014-07-21 15:29:22 -07003640 reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
3641 reg |= EXT_ENERGY_DET_MASK;
3642 bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
3643 }
3644
Florian Fainelli98bb7392014-08-11 14:50:45 -07003645 if (priv->wolopts)
3646 bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
3647
Florian Fainellib6e978e2014-07-21 15:29:22 -07003648 /* Disable RX/TX DMA and flush TX queues */
3649 dma_ctrl = bcmgenet_dma_disable(priv);
3650
3651 /* Reinitialize TDMA and RDMA and SW housekeeping */
3652 ret = bcmgenet_init_dma(priv);
3653 if (ret) {
3654 netdev_err(dev, "failed to initialize DMA\n");
3655 goto out_clk_disable;
3656 }
3657
3658 /* Always enable ring 16 - descriptor ring */
3659 bcmgenet_enable_dma(priv, dma_ctrl);
3660
Florian Fainelli5371bbf42017-03-15 12:57:21 -07003661 if (!device_may_wakeup(d))
Doug Berger6c97f012017-10-25 15:04:19 -07003662 phy_resume(dev->phydev);
Florian Fainellicc013fb2014-08-11 14:50:43 -07003663
Florian Fainelli6ef398e2014-11-25 21:16:35 -08003664 if (priv->eee.eee_enabled)
3665 bcmgenet_eee_enable_set(dev, true);
3666
Florian Fainellib6e978e2014-07-21 15:29:22 -07003667 bcmgenet_netif_start(dev);
3668
Doug Berger09e805d2018-11-01 15:55:37 -07003669 netif_device_attach(dev);
3670
Florian Fainellib6e978e2014-07-21 15:29:22 -07003671 return 0;
3672
3673out_clk_disable:
Doug Berger76274092017-03-09 16:58:46 -08003674 if (priv->internal_phy)
3675 bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
Florian Fainellib6e978e2014-07-21 15:29:22 -07003676 clk_disable_unprepare(priv->clk);
3677 return ret;
3678}
Doug Bergera94cbf02018-11-16 18:00:21 -08003679
3680static int bcmgenet_suspend(struct device *d)
3681{
3682 struct net_device *dev = dev_get_drvdata(d);
3683 struct bcmgenet_priv *priv = netdev_priv(dev);
3684 int ret = 0;
3685
3686 if (!netif_running(dev))
3687 return 0;
3688
3689 netif_device_detach(dev);
3690
3691 bcmgenet_netif_stop(dev);
3692
3693 if (!device_may_wakeup(d))
3694 phy_suspend(dev->phydev);
3695
3696 /* Prepare the device for Wake-on-LAN and switch to the slow clock */
3697 if (device_may_wakeup(d) && priv->wolopts) {
3698 ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
3699 clk_prepare_enable(priv->clk_wol);
3700 } else if (priv->internal_phy) {
3701 ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
3702 }
3703
3704 /* Turn off the clocks */
3705 clk_disable_unprepare(priv->clk);
3706
Doug Bergerc5a54bb2018-11-16 18:00:22 -08003707 if (ret)
3708 bcmgenet_resume(d);
3709
Doug Bergera94cbf02018-11-16 18:00:21 -08003710 return ret;
3711}
Florian Fainellib6e978e2014-07-21 15:29:22 -07003712#endif /* CONFIG_PM_SLEEP */
3713
3714static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
3715
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003716static struct platform_driver bcmgenet_driver = {
3717 .probe = bcmgenet_probe,
3718 .remove = bcmgenet_remove,
Florian Fainellid9f45ab2019-10-15 10:36:24 -07003719 .shutdown = bcmgenet_shutdown,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003720 .driver = {
3721 .name = "bcmgenet",
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003722 .of_match_table = bcmgenet_match,
Florian Fainellib6e978e2014-07-21 15:29:22 -07003723 .pm = &bcmgenet_pm_ops,
Florian Fainelli1c1008c2014-02-13 16:08:47 -08003724 },
3725};
3726module_platform_driver(bcmgenet_driver);
3727
3728MODULE_AUTHOR("Broadcom Corporation");
3729MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
3730MODULE_ALIAS("platform:bcmgenet");
3731MODULE_LICENSE("GPL");