blob: f00ad73ea8e3d6d5ae36755330596e371901b561 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002/*
Jamie Ilesf75ba502011-11-08 10:12:32 +00003 * Cadence MACB/GEM Ethernet Controller driver
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004 *
5 * Copyright (C) 2004-2006 Atmel Corporation
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01006 */
7
Jamie Ilesc220f8c2011-03-08 20:27:08 +00008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01009#include <linux/clk.h>
Yash Shahc218ad52019-06-18 13:26:08 +053010#include <linux/clk-provider.h>
Claudiu Beznea653e92a2018-08-07 12:25:14 +030011#include <linux/crc32.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010012#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/kernel.h>
15#include <linux/types.h>
Nicolas Ferre909a8582012-11-19 06:00:21 +000016#include <linux/circ_buf.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010017#include <linux/slab.h>
18#include <linux/init.h>
Soren Brinkmann60fe7162013-12-10 16:07:21 -080019#include <linux/io.h>
Joachim Eastwood2dbfdbb92012-11-11 13:56:27 +000020#include <linux/gpio.h>
Gregory CLEMENT270c4992015-12-17 10:51:04 +010021#include <linux/gpio/consumer.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000022#include <linux/interrupt.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010023#include <linux/netdevice.h>
24#include <linux/etherdevice.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010025#include <linux/dma-mapping.h>
Jamie Iles84e0cdb2011-03-08 20:17:06 +000026#include <linux/platform_data/macb.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010027#include <linux/platform_device.h>
Antoine Tenart7897b072019-11-13 10:00:06 +010028#include <linux/phylink.h>
Olof Johanssonb17471f2011-12-20 13:13:07 -080029#include <linux/of.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010030#include <linux/of_device.h>
Gregory CLEMENT270c4992015-12-17 10:51:04 +010031#include <linux/of_gpio.h>
Boris BREZILLON148cbb52013-08-22 17:57:28 +020032#include <linux/of_mdio.h>
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +010033#include <linux/of_net.h>
Rafal Ozieblo1629dd42016-11-16 10:02:34 +000034#include <linux/ip.h>
35#include <linux/udp.h>
36#include <linux/tcp.h>
Harini Katakam8beb79b2019-03-01 16:20:32 +053037#include <linux/iopoll.h>
Harini Katakamd54f89a2019-03-01 16:20:34 +053038#include <linux/pm_runtime.h>
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010039#include "macb.h"
40
Yash Shahc218ad52019-06-18 13:26:08 +053041/* This structure is only used for MACB on SiFive FU540 devices */
42struct sifive_fu540_macb_mgmt {
43 void __iomem *reg;
44 unsigned long rate;
45 struct clk_hw hw;
46};
47
Nicolas Ferre1b447912013-06-04 21:57:11 +000048#define MACB_RX_BUFFER_SIZE 128
Nicolas Ferre1b447912013-06-04 21:57:11 +000049#define RX_BUFFER_MULTIPLE 64 /* bytes */
Zach Brown8441bb32016-10-19 09:56:58 -050050
Zach Brownb410d132016-10-19 09:56:57 -050051#define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
Zach Brown8441bb32016-10-19 09:56:58 -050052#define MIN_RX_RING_SIZE 64
53#define MAX_RX_RING_SIZE 8192
Rafal Ozieblodc97a892017-01-27 15:08:20 +000054#define RX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
Zach Brownb410d132016-10-19 09:56:57 -050055 * (bp)->rx_ring_size)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010056
Zach Brownb410d132016-10-19 09:56:57 -050057#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
Zach Brown8441bb32016-10-19 09:56:58 -050058#define MIN_TX_RING_SIZE 64
59#define MAX_TX_RING_SIZE 4096
Rafal Ozieblodc97a892017-01-27 15:08:20 +000060#define TX_RING_BYTES(bp) (macb_dma_desc_get_size(bp) \
Zach Brownb410d132016-10-19 09:56:57 -050061 * (bp)->tx_ring_size)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010062
Nicolas Ferre909a8582012-11-19 06:00:21 +000063/* level of occupied TX descriptors under which we wake up TX process */
Zach Brownb410d132016-10-19 09:56:57 -050064#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010065
Harini Katakame5010702019-01-29 15:20:03 +053066#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
Nicolas Ferree86cd532012-10-31 06:04:57 +000067#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
68 | MACB_BIT(ISR_RLE) \
69 | MACB_BIT(TXERR))
Claudiu Beznea42983882018-12-17 10:02:42 +000070#define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
71 | MACB_BIT(TXUBR))
Nicolas Ferree86cd532012-10-31 06:04:57 +000072
Rafal Ozieblo1629dd42016-11-16 10:02:34 +000073/* Max length of transmit frame must be a multiple of 8 bytes */
74#define MACB_TX_LEN_ALIGN 8
75#define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
Harini Katakamf822e9c2020-02-05 18:08:12 +053076/* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
77 * false amba_error in TX path from the DMA assuming there is not enough
78 * space in the SRAM (16KB) even when there is.
79 */
80#define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +020081
Jarod Wilson44770e12016-10-17 15:54:17 -040082#define GEM_MTU_MIN_SIZE ETH_MIN_MTU
David S. Millerf9c45ae2017-07-03 06:31:05 -070083#define MACB_NETIF_LSO NETIF_F_TSO
Harini Katakama5898ea2015-05-06 22:27:18 +053084
Sergio Prado3e2a5e12016-02-09 12:07:16 -020085#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
86#define MACB_WOL_ENABLED (0x1 << 1)
87
Moritz Fischer64ec42f2016-03-29 19:11:12 -070088/* Graceful stop timeouts in us. We should allow up to
Nicolas Ferree86cd532012-10-31 06:04:57 +000089 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
90 */
91#define MACB_HALT_TIMEOUT 1230
Haavard Skinnemoen89e57852006-11-09 14:51:17 +010092
Harini Katakamd54f89a2019-03-01 16:20:34 +053093#define MACB_PM_TIMEOUT 100 /* ms */
94
Harini Katakam8beb79b2019-03-01 16:20:32 +053095#define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
96
Rafal Ozieblodc97a892017-01-27 15:08:20 +000097/* DMA buffer descriptor might be different size
Rafal Ozieblo7b429612017-06-29 07:12:51 +010098 * depends on hardware configuration:
99 *
100 * 1. dma address width 32 bits:
101 * word 1: 32 bit address of Data Buffer
102 * word 2: control
103 *
104 * 2. dma address width 64 bits:
105 * word 1: 32 bit address of Data Buffer
106 * word 2: control
107 * word 3: upper 32 bit address of Data Buffer
108 * word 4: unused
109 *
110 * 3. dma address width 32 bits with hardware timestamping:
111 * word 1: 32 bit address of Data Buffer
112 * word 2: control
113 * word 3: timestamp word 1
114 * word 4: timestamp word 2
115 *
116 * 4. dma address width 64 bits with hardware timestamping:
117 * word 1: 32 bit address of Data Buffer
118 * word 2: control
119 * word 3: upper 32 bit address of Data Buffer
120 * word 4: unused
121 * word 5: timestamp word 1
122 * word 6: timestamp word 2
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000123 */
124static unsigned int macb_dma_desc_get_size(struct macb *bp)
125{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100126#ifdef MACB_EXT_DESC
127 unsigned int desc_size;
128
129 switch (bp->hw_dma_cap) {
130 case HW_DMA_CAP_64B:
131 desc_size = sizeof(struct macb_dma_desc)
132 + sizeof(struct macb_dma_desc_64);
133 break;
134 case HW_DMA_CAP_PTP:
135 desc_size = sizeof(struct macb_dma_desc)
136 + sizeof(struct macb_dma_desc_ptp);
137 break;
138 case HW_DMA_CAP_64B_PTP:
139 desc_size = sizeof(struct macb_dma_desc)
140 + sizeof(struct macb_dma_desc_64)
141 + sizeof(struct macb_dma_desc_ptp);
142 break;
143 default:
144 desc_size = sizeof(struct macb_dma_desc);
145 }
146 return desc_size;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000147#endif
148 return sizeof(struct macb_dma_desc);
149}
150
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100151static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000152{
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100153#ifdef MACB_EXT_DESC
154 switch (bp->hw_dma_cap) {
155 case HW_DMA_CAP_64B:
156 case HW_DMA_CAP_PTP:
157 desc_idx <<= 1;
158 break;
159 case HW_DMA_CAP_64B_PTP:
160 desc_idx *= 3;
161 break;
162 default:
163 break;
164 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000165#endif
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100166 return desc_idx;
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000167}
168
169#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
170static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
171{
Shubhrajyoti Datta99dcb842019-09-23 14:03:51 +0530172 return (struct macb_dma_desc_64 *)((void *)desc
173 + sizeof(struct macb_dma_desc));
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000174}
175#endif
176
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000177/* Ring buffer accessors */
Zach Brownb410d132016-10-19 09:56:57 -0500178static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000179{
Zach Brownb410d132016-10-19 09:56:57 -0500180 return index & (bp->tx_ring_size - 1);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000181}
182
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100183static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
184 unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000185{
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000186 index = macb_tx_ring_wrap(queue->bp, index);
187 index = macb_adj_dma_desc_idx(queue->bp, index);
188 return &queue->tx_ring[index];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000189}
190
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100191static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
192 unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000193{
Zach Brownb410d132016-10-19 09:56:57 -0500194 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000195}
196
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100197static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000198{
199 dma_addr_t offset;
200
Zach Brownb410d132016-10-19 09:56:57 -0500201 offset = macb_tx_ring_wrap(queue->bp, index) *
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000202 macb_dma_desc_get_size(queue->bp);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000203
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100204 return queue->tx_ring_dma + offset;
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000205}
206
Zach Brownb410d132016-10-19 09:56:57 -0500207static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000208{
Zach Brownb410d132016-10-19 09:56:57 -0500209 return index & (bp->rx_ring_size - 1);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000210}
211
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000212static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000213{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000214 index = macb_rx_ring_wrap(queue->bp, index);
215 index = macb_adj_dma_desc_idx(queue->bp, index);
216 return &queue->rx_ring[index];
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000217}
218
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000219static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000220{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +0000221 return queue->rx_buffers + queue->bp->rx_buffer_size *
222 macb_rx_ring_wrap(queue->bp, index);
Havard Skinnemoen55054a12012-10-31 06:04:55 +0000223}
224
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +0300225/* I/O accessors */
226static u32 hw_readl_native(struct macb *bp, int offset)
227{
228 return __raw_readl(bp->regs + offset);
229}
230
231static void hw_writel_native(struct macb *bp, int offset, u32 value)
232{
233 __raw_writel(value, bp->regs + offset);
234}
235
236static u32 hw_readl(struct macb *bp, int offset)
237{
238 return readl_relaxed(bp->regs + offset);
239}
240
241static void hw_writel(struct macb *bp, int offset, u32 value)
242{
243 writel_relaxed(value, bp->regs + offset);
244}
245
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700246/* Find the CPU endianness by using the loopback bit of NCR register. When the
Moritz Fischer88023be2016-03-29 19:11:15 -0700247 * CPU is in big endian we need to program swapped mode for management
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +0300248 * descriptor access.
249 */
250static bool hw_is_native_io(void __iomem *addr)
251{
252 u32 value = MACB_BIT(LLB);
253
254 __raw_writel(value, addr + MACB_NCR);
255 value = __raw_readl(addr + MACB_NCR);
256
257 /* Write 0 back to disable everything */
258 __raw_writel(0, addr + MACB_NCR);
259
260 return value == MACB_BIT(LLB);
261}
262
263static bool hw_is_gem(void __iomem *addr, bool native_io)
264{
265 u32 id;
266
267 if (native_io)
268 id = __raw_readl(addr + MACB_MID);
269 else
270 id = readl_relaxed(addr + MACB_MID);
271
272 return MACB_BFEXT(IDNUM, id) >= 0x2;
273}
274
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100275static void macb_set_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100276{
277 u32 bottom;
278 u16 top;
279
280 bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000281 macb_or_gem_writel(bp, SA1B, bottom);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100282 top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
Jamie Ilesf75ba502011-11-08 10:12:32 +0000283 macb_or_gem_writel(bp, SA1T, top);
Joachim Eastwood3629a6c2012-11-11 13:56:28 +0000284
285 /* Clear unused address register sets */
286 macb_or_gem_writel(bp, SA2B, 0);
287 macb_or_gem_writel(bp, SA2T, 0);
288 macb_or_gem_writel(bp, SA3B, 0);
289 macb_or_gem_writel(bp, SA3T, 0);
290 macb_or_gem_writel(bp, SA4B, 0);
291 macb_or_gem_writel(bp, SA4T, 0);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100292}
293
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100294static void macb_get_hwaddr(struct macb *bp)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100295{
296 u32 bottom;
297 u16 top;
298 u8 addr[6];
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000299 int i;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100300
Moritz Fischeraa50b552016-03-29 19:11:13 -0700301 /* Check all 4 address register for valid address */
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000302 for (i = 0; i < 4; i++) {
303 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
304 top = macb_or_gem_readl(bp, SA1T + i * 8);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100305
Nicolas Ferre8b952742019-05-03 12:36:58 +0200306 addr[0] = bottom & 0xff;
307 addr[1] = (bottom >> 8) & 0xff;
308 addr[2] = (bottom >> 16) & 0xff;
309 addr[3] = (bottom >> 24) & 0xff;
310 addr[4] = top & 0xff;
311 addr[5] = (top >> 8) & 0xff;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100312
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000313 if (is_valid_ether_addr(addr)) {
314 memcpy(bp->dev->dev_addr, addr, sizeof(addr));
315 return;
316 }
Sven Schnelled1d57412008-06-09 16:33:57 -0700317 }
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000318
Andy Shevchenkoa35919e2015-07-24 21:24:01 +0300319 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
Joachim Eastwood17b8bb32012-11-07 08:14:50 +0000320 eth_hw_addr_random(bp->dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100321}
322
Harini Katakam8beb79b2019-03-01 16:20:32 +0530323static int macb_mdio_wait_for_idle(struct macb *bp)
324{
325 u32 val;
326
327 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
328 1, MACB_MDIO_TIMEOUT);
329}
330
frederic RODO6c36a702007-07-12 19:07:24 +0200331static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100332{
frederic RODO6c36a702007-07-12 19:07:24 +0200333 struct macb *bp = bus->priv;
Harini Katakamd54f89a2019-03-01 16:20:34 +0530334 int status;
Harini Katakam8beb79b2019-03-01 16:20:32 +0530335
Harini Katakamd54f89a2019-03-01 16:20:34 +0530336 status = pm_runtime_get_sync(&bp->pdev->dev);
Andy Shevchenko0ce205d2020-04-27 13:51:20 +0300337 if (status < 0) {
338 pm_runtime_put_noidle(&bp->pdev->dev);
Harini Katakamd54f89a2019-03-01 16:20:34 +0530339 goto mdio_pm_exit;
Andy Shevchenko0ce205d2020-04-27 13:51:20 +0300340 }
Harini Katakamd54f89a2019-03-01 16:20:34 +0530341
342 status = macb_mdio_wait_for_idle(bp);
343 if (status < 0)
344 goto mdio_read_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100345
Milind Parab43ad3522020-01-09 08:36:46 +0000346 if (regnum & MII_ADDR_C45) {
347 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
348 | MACB_BF(RW, MACB_MAN_C45_ADDR)
349 | MACB_BF(PHYA, mii_id)
350 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
351 | MACB_BF(DATA, regnum & 0xFFFF)
352 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
353
354 status = macb_mdio_wait_for_idle(bp);
355 if (status < 0)
356 goto mdio_read_exit;
357
358 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
359 | MACB_BF(RW, MACB_MAN_C45_READ)
360 | MACB_BF(PHYA, mii_id)
361 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
362 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
363 } else {
364 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
365 | MACB_BF(RW, MACB_MAN_C22_READ)
366 | MACB_BF(PHYA, mii_id)
367 | MACB_BF(REGA, regnum)
368 | MACB_BF(CODE, MACB_MAN_C22_CODE)));
369 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100370
Harini Katakamd54f89a2019-03-01 16:20:34 +0530371 status = macb_mdio_wait_for_idle(bp);
372 if (status < 0)
373 goto mdio_read_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100374
Harini Katakamd54f89a2019-03-01 16:20:34 +0530375 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100376
Harini Katakamd54f89a2019-03-01 16:20:34 +0530377mdio_read_exit:
378 pm_runtime_mark_last_busy(&bp->pdev->dev);
379 pm_runtime_put_autosuspend(&bp->pdev->dev);
380mdio_pm_exit:
381 return status;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100382}
383
frederic RODO6c36a702007-07-12 19:07:24 +0200384static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
385 u16 value)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100386{
frederic RODO6c36a702007-07-12 19:07:24 +0200387 struct macb *bp = bus->priv;
Harini Katakamd54f89a2019-03-01 16:20:34 +0530388 int status;
Harini Katakam8beb79b2019-03-01 16:20:32 +0530389
Harini Katakamd54f89a2019-03-01 16:20:34 +0530390 status = pm_runtime_get_sync(&bp->pdev->dev);
Andy Shevchenko0ce205d2020-04-27 13:51:20 +0300391 if (status < 0) {
392 pm_runtime_put_noidle(&bp->pdev->dev);
Harini Katakamd54f89a2019-03-01 16:20:34 +0530393 goto mdio_pm_exit;
Andy Shevchenko0ce205d2020-04-27 13:51:20 +0300394 }
Harini Katakamd54f89a2019-03-01 16:20:34 +0530395
396 status = macb_mdio_wait_for_idle(bp);
397 if (status < 0)
398 goto mdio_write_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100399
Milind Parab43ad3522020-01-09 08:36:46 +0000400 if (regnum & MII_ADDR_C45) {
401 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
402 | MACB_BF(RW, MACB_MAN_C45_ADDR)
403 | MACB_BF(PHYA, mii_id)
404 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
405 | MACB_BF(DATA, regnum & 0xFFFF)
406 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
407
408 status = macb_mdio_wait_for_idle(bp);
409 if (status < 0)
410 goto mdio_write_exit;
411
412 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
413 | MACB_BF(RW, MACB_MAN_C45_WRITE)
414 | MACB_BF(PHYA, mii_id)
415 | MACB_BF(REGA, (regnum >> 16) & 0x1F)
416 | MACB_BF(CODE, MACB_MAN_C45_CODE)
417 | MACB_BF(DATA, value)));
418 } else {
419 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
420 | MACB_BF(RW, MACB_MAN_C22_WRITE)
421 | MACB_BF(PHYA, mii_id)
422 | MACB_BF(REGA, regnum)
423 | MACB_BF(CODE, MACB_MAN_C22_CODE)
424 | MACB_BF(DATA, value)));
425 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100426
Harini Katakamd54f89a2019-03-01 16:20:34 +0530427 status = macb_mdio_wait_for_idle(bp);
428 if (status < 0)
429 goto mdio_write_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100430
Harini Katakamd54f89a2019-03-01 16:20:34 +0530431mdio_write_exit:
432 pm_runtime_mark_last_busy(&bp->pdev->dev);
433 pm_runtime_put_autosuspend(&bp->pdev->dev);
434mdio_pm_exit:
435 return status;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100436}
437
Antoine Tenart6e952d92019-11-13 10:00:05 +0100438static void macb_init_buffers(struct macb *bp)
439{
440 struct macb_queue *queue;
441 unsigned int q;
442
443 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
444 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
445#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
446 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
447 queue_writel(queue, RBQPH,
448 upper_32_bits(queue->rx_ring_dma));
449#endif
450 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
451#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
452 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
453 queue_writel(queue, TBQPH,
454 upper_32_bits(queue->tx_ring_dma));
455#endif
456 }
457}
458
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800459/**
460 * macb_set_tx_clk() - Set a clock to a new frequency
Jesse Brandeburgd0ea5cb2020-09-25 15:24:45 -0700461 * @clk: Pointer to the clock to change
462 * @speed: New frequency in Hz
463 * @dev: Pointer to the struct net_device
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800464 */
465static void macb_set_tx_clk(struct clk *clk, int speed, struct net_device *dev)
466{
467 long ferr, rate, rate_rounded;
468
Cyrille Pitchen93b31f42015-03-07 07:23:31 +0100469 if (!clk)
470 return;
471
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800472 switch (speed) {
473 case SPEED_10:
474 rate = 2500000;
475 break;
476 case SPEED_100:
477 rate = 25000000;
478 break;
479 case SPEED_1000:
480 rate = 125000000;
481 break;
482 default:
Soren Brinkmann9319e472013-12-10 20:57:57 -0800483 return;
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800484 }
485
486 rate_rounded = clk_round_rate(clk, rate);
487 if (rate_rounded < 0)
488 return;
489
490 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
491 * is not satisfied.
492 */
493 ferr = abs(rate_rounded - rate);
494 ferr = DIV_ROUND_UP(ferr, rate / 100000);
495 if (ferr > 5)
496 netdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700497 rate);
Soren Brinkmanne1824df2013-12-10 16:07:23 -0800498
499 if (clk_set_rate(clk, rate_rounded))
500 netdev_err(dev, "adjusting tx_clk failed.\n");
501}
502
Antoine Tenart7897b072019-11-13 10:00:06 +0100503static void macb_validate(struct phylink_config *config,
504 unsigned long *supported,
505 struct phylink_link_state *state)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100506{
Antoine Tenart7897b072019-11-13 10:00:06 +0100507 struct net_device *ndev = to_net_dev(config->dev);
508 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
509 struct macb *bp = netdev_priv(ndev);
510
511 /* We only support MII, RMII, GMII, RGMII & SGMII. */
512 if (state->interface != PHY_INTERFACE_MODE_NA &&
513 state->interface != PHY_INTERFACE_MODE_MII &&
514 state->interface != PHY_INTERFACE_MODE_RMII &&
515 state->interface != PHY_INTERFACE_MODE_GMII &&
516 state->interface != PHY_INTERFACE_MODE_SGMII &&
517 !phy_interface_mode_is_rgmii(state->interface)) {
518 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
519 return;
520 }
521
522 if (!macb_is_gem(bp) &&
523 (state->interface == PHY_INTERFACE_MODE_GMII ||
524 phy_interface_mode_is_rgmii(state->interface))) {
525 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
526 return;
527 }
528
529 phylink_set_port_modes(mask);
530 phylink_set(mask, Autoneg);
531 phylink_set(mask, Asym_Pause);
532
533 phylink_set(mask, 10baseT_Half);
534 phylink_set(mask, 10baseT_Full);
535 phylink_set(mask, 100baseT_Half);
536 phylink_set(mask, 100baseT_Full);
537
538 if (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE &&
539 (state->interface == PHY_INTERFACE_MODE_NA ||
540 state->interface == PHY_INTERFACE_MODE_GMII ||
541 state->interface == PHY_INTERFACE_MODE_SGMII ||
542 phy_interface_mode_is_rgmii(state->interface))) {
543 phylink_set(mask, 1000baseT_Full);
544 phylink_set(mask, 1000baseX_Full);
545
546 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
547 phylink_set(mask, 1000baseT_Half);
548 }
549
550 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
551 bitmap_and(state->advertising, state->advertising, mask,
552 __ETHTOOL_LINK_MODE_MASK_NBITS);
553}
554
Russell Kingd46b7e42019-11-21 00:36:22 +0000555static void macb_mac_pcs_get_state(struct phylink_config *config,
556 struct phylink_link_state *state)
Antoine Tenart7897b072019-11-13 10:00:06 +0100557{
Russell Kingd46b7e42019-11-21 00:36:22 +0000558 state->link = 0;
Antoine Tenart7897b072019-11-13 10:00:06 +0100559}
560
561static void macb_mac_an_restart(struct phylink_config *config)
562{
563 /* Not supported */
564}
565
566static void macb_mac_config(struct phylink_config *config, unsigned int mode,
567 const struct phylink_link_state *state)
568{
569 struct net_device *ndev = to_net_dev(config->dev);
570 struct macb *bp = netdev_priv(ndev);
frederic RODO6c36a702007-07-12 19:07:24 +0200571 unsigned long flags;
Antoine Tenart7897b072019-11-13 10:00:06 +0100572 u32 old_ctrl, ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100573
frederic RODO6c36a702007-07-12 19:07:24 +0200574 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100575
Antoine Tenart7897b072019-11-13 10:00:06 +0100576 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100577
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +0100578 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
579 if (state->interface == PHY_INTERFACE_MODE_RMII)
580 ctrl |= MACB_BIT(RM9200_RMII);
Stefan Roesef7ba7db2020-08-04 14:17:16 +0200581 } else if (macb_is_gem(bp)) {
Russell King633e98a2020-02-26 10:24:06 +0000582 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +0100583
584 if (state->interface == PHY_INTERFACE_MODE_SGMII)
585 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
586 }
frederic RODO6c36a702007-07-12 19:07:24 +0200587
Antoine Tenart7897b072019-11-13 10:00:06 +0100588 /* Apply the new configuration, if any */
589 if (old_ctrl ^ ctrl)
590 macb_or_gem_writel(bp, NCFGR, ctrl);
591
frederic RODO6c36a702007-07-12 19:07:24 +0200592 spin_unlock_irqrestore(&bp->lock, flags);
frederic RODO6c36a702007-07-12 19:07:24 +0200593}
594
Antoine Tenart7897b072019-11-13 10:00:06 +0100595static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
596 phy_interface_t interface)
frederic RODO6c36a702007-07-12 19:07:24 +0200597{
Antoine Tenart7897b072019-11-13 10:00:06 +0100598 struct net_device *ndev = to_net_dev(config->dev);
599 struct macb *bp = netdev_priv(ndev);
600 struct macb_queue *queue;
601 unsigned int q;
602 u32 ctrl;
603
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +0100604 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
605 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
606 queue_writel(queue, IDR,
607 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
Antoine Tenart7897b072019-11-13 10:00:06 +0100608
609 /* Disable Rx and Tx */
610 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
611 macb_writel(bp, NCR, ctrl);
612
613 netif_tx_stop_all_queues(ndev);
614}
615
Russell King91a208f2020-02-26 10:23:41 +0000616static void macb_mac_link_up(struct phylink_config *config,
617 struct phy_device *phy,
618 unsigned int mode, phy_interface_t interface,
619 int speed, int duplex,
620 bool tx_pause, bool rx_pause)
Antoine Tenart7897b072019-11-13 10:00:06 +0100621{
622 struct net_device *ndev = to_net_dev(config->dev);
623 struct macb *bp = netdev_priv(ndev);
624 struct macb_queue *queue;
Russell King633e98a2020-02-26 10:24:06 +0000625 unsigned long flags;
Antoine Tenart7897b072019-11-13 10:00:06 +0100626 unsigned int q;
Russell King633e98a2020-02-26 10:24:06 +0000627 u32 ctrl;
628
629 spin_lock_irqsave(&bp->lock, flags);
630
631 ctrl = macb_or_gem_readl(bp, NCFGR);
632
633 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
634
635 if (speed == SPEED_100)
636 ctrl |= MACB_BIT(SPD);
637
638 if (duplex)
639 ctrl |= MACB_BIT(FD);
Antoine Tenart7897b072019-11-13 10:00:06 +0100640
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +0100641 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
Stefan Roesef7ba7db2020-08-04 14:17:16 +0200642 ctrl &= ~MACB_BIT(PAE);
643 if (macb_is_gem(bp)) {
644 ctrl &= ~GEM_BIT(GBE);
Russell King633e98a2020-02-26 10:24:06 +0000645
Stefan Roesef7ba7db2020-08-04 14:17:16 +0200646 if (speed == SPEED_1000)
647 ctrl |= GEM_BIT(GBE);
648 }
Russell King633e98a2020-02-26 10:24:06 +0000649
Parshuram Thombared7739b02020-09-05 10:21:33 +0200650 if (rx_pause)
Russell King633e98a2020-02-26 10:24:06 +0000651 ctrl |= MACB_BIT(PAE);
652
653 macb_set_tx_clk(bp->tx_clk, speed, ndev);
Antoine Tenart7897b072019-11-13 10:00:06 +0100654
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +0100655 /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
656 * cleared the pipeline and control registers.
657 */
658 bp->macbgem_ops.mog_init_rings(bp);
659 macb_init_buffers(bp);
Antoine Tenart7897b072019-11-13 10:00:06 +0100660
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +0100661 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
662 queue_writel(queue, IER,
663 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
664 }
Antoine Tenart7897b072019-11-13 10:00:06 +0100665
Russell King633e98a2020-02-26 10:24:06 +0000666 macb_or_gem_writel(bp, NCFGR, ctrl);
667
668 spin_unlock_irqrestore(&bp->lock, flags);
669
Antoine Tenart7897b072019-11-13 10:00:06 +0100670 /* Enable Rx and Tx */
671 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
672
673 netif_tx_wake_all_queues(ndev);
674}
675
676static const struct phylink_mac_ops macb_phylink_ops = {
677 .validate = macb_validate,
Russell Kingd46b7e42019-11-21 00:36:22 +0000678 .mac_pcs_get_state = macb_mac_pcs_get_state,
Antoine Tenart7897b072019-11-13 10:00:06 +0100679 .mac_an_restart = macb_mac_an_restart,
680 .mac_config = macb_mac_config,
681 .mac_link_down = macb_mac_link_down,
682 .mac_link_up = macb_mac_link_up,
683};
684
Milind Parabfd2a8912020-01-13 03:30:43 +0000685static bool macb_phy_handle_exists(struct device_node *dn)
686{
687 dn = of_parse_phandle(dn, "phy-handle", 0);
688 of_node_put(dn);
689 return dn != NULL;
690}
691
Antoine Tenart7897b072019-11-13 10:00:06 +0100692static int macb_phylink_connect(struct macb *bp)
693{
Milind Parabfd2a8912020-01-13 03:30:43 +0000694 struct device_node *dn = bp->pdev->dev.of_node;
Antoine Tenart7897b072019-11-13 10:00:06 +0100695 struct net_device *dev = bp->dev;
Jiri Pirko7455a762010-02-08 05:12:08 +0000696 struct phy_device *phydev;
Antoine Tenart7897b072019-11-13 10:00:06 +0100697 int ret;
Brad Mouring739de9a2018-03-13 16:32:13 -0500698
Milind Parabfd2a8912020-01-13 03:30:43 +0000699 if (dn)
700 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
701
702 if (!dn || (ret && !macb_phy_handle_exists(dn))) {
Michael Grzeschikdacdbb42017-06-23 16:54:10 +0200703 phydev = phy_find_first(bp->mii_bus);
704 if (!phydev) {
705 netdev_err(dev, "no PHY found\n");
706 return -ENXIO;
Joachim Eastwood2dbfdbb92012-11-11 13:56:27 +0000707 }
frederic RODO6c36a702007-07-12 19:07:24 +0200708
Michael Grzeschikdacdbb42017-06-23 16:54:10 +0200709 /* attach the mac to the phy */
Antoine Tenart7897b072019-11-13 10:00:06 +0100710 ret = phylink_connect_phy(bp->phylink, phydev);
Milind Parabfd2a8912020-01-13 03:30:43 +0000711 }
712
713 if (ret) {
714 netdev_err(dev, "Could not attach PHY (%d)\n", ret);
715 return ret;
frederic RODO6c36a702007-07-12 19:07:24 +0200716 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100717
Antoine Tenart7897b072019-11-13 10:00:06 +0100718 phylink_start(bp->phylink);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100719
Antoine Tenart7897b072019-11-13 10:00:06 +0100720 return 0;
721}
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100722
Antoine Tenart7897b072019-11-13 10:00:06 +0100723/* based on au1000_eth. c*/
724static int macb_mii_probe(struct net_device *dev)
725{
726 struct macb *bp = netdev_priv(dev);
727
728 bp->phylink_config.dev = &dev->dev;
729 bp->phylink_config.type = PHYLINK_NETDEV;
730
731 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
732 bp->phy_interface, &macb_phylink_ops);
733 if (IS_ERR(bp->phylink)) {
734 netdev_err(dev, "Could not create a phylink instance (%ld)\n",
735 PTR_ERR(bp->phylink));
736 return PTR_ERR(bp->phylink);
737 }
frederic RODO6c36a702007-07-12 19:07:24 +0200738
739 return 0;
740}
741
Antoine Tenartef8a2e22019-12-17 18:07:42 +0100742static int macb_mdiobus_register(struct macb *bp)
743{
744 struct device_node *child, *np = bp->pdev->dev.of_node;
745
Codrin Ciubotariu79540d12020-03-31 12:39:35 +0300746 if (of_phy_is_fixed_link(np))
747 return mdiobus_register(bp->mii_bus);
748
Antoine Tenartef8a2e22019-12-17 18:07:42 +0100749 /* Only create the PHY from the device tree if at least one PHY is
750 * described. Otherwise scan the entire MDIO bus. We do this to support
751 * old device tree that did not follow the best practices and did not
752 * describe their network PHYs.
753 */
754 for_each_available_child_of_node(np, child)
755 if (of_mdiobus_child_is_phy(child)) {
756 /* The loop increments the child refcount,
757 * decrement it before returning.
758 */
759 of_node_put(child);
760
761 return of_mdiobus_register(bp->mii_bus, np);
762 }
763
764 return mdiobus_register(bp->mii_bus);
765}
766
Cyrille Pitchen421d9df2015-03-07 07:23:32 +0100767static int macb_mii_init(struct macb *bp)
frederic RODO6c36a702007-07-12 19:07:24 +0200768{
Ahmad Fatoumab5f1102018-08-21 17:35:48 +0200769 int err = -ENXIO;
frederic RODO6c36a702007-07-12 19:07:24 +0200770
Uwe Kleine-Koenig3dbda772009-07-23 08:31:31 +0200771 /* Enable management port */
frederic RODO6c36a702007-07-12 19:07:24 +0200772 macb_writel(bp, NCR, MACB_BIT(MPE));
773
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700774 bp->mii_bus = mdiobus_alloc();
Moritz Fischeraa50b552016-03-29 19:11:13 -0700775 if (!bp->mii_bus) {
frederic RODO6c36a702007-07-12 19:07:24 +0200776 err = -ENOMEM;
777 goto err_out;
778 }
779
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700780 bp->mii_bus->name = "MACB_mii_bus";
781 bp->mii_bus->read = &macb_mdio_read;
782 bp->mii_bus->write = &macb_mdio_write;
Florian Fainelli98d5e572012-01-09 23:59:11 +0000783 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Moritz Fischeraa50b552016-03-29 19:11:13 -0700784 bp->pdev->name, bp->pdev->id);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700785 bp->mii_bus->priv = bp;
Florian Fainellicf669662016-05-02 18:38:45 -0700786 bp->mii_bus->parent = &bp->pdev->dev;
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700787
Jamie Iles91523942011-02-28 04:05:25 +0000788 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200789
Antoine Tenartef8a2e22019-12-17 18:07:42 +0100790 err = macb_mdiobus_register(bp);
Boris BREZILLON148cbb52013-08-22 17:57:28 +0200791 if (err)
Antoine Tenart7897b072019-11-13 10:00:06 +0100792 goto err_out_free_mdiobus;
frederic RODO6c36a702007-07-12 19:07:24 +0200793
Boris BREZILLON7daa78e2013-08-27 14:36:14 +0200794 err = macb_mii_probe(bp->dev);
795 if (err)
frederic RODO6c36a702007-07-12 19:07:24 +0200796 goto err_out_unregister_bus;
frederic RODO6c36a702007-07-12 19:07:24 +0200797
798 return 0;
799
800err_out_unregister_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700801 mdiobus_unregister(bp->mii_bus);
Brad Mouring739de9a2018-03-13 16:32:13 -0500802err_out_free_mdiobus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -0700803 mdiobus_free(bp->mii_bus);
frederic RODO6c36a702007-07-12 19:07:24 +0200804err_out:
805 return err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100806}
807
808static void macb_update_stats(struct macb *bp)
809{
Jamie Ilesa494ed82011-03-09 16:26:35 +0000810 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
811 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +0300812 int offset = MACB_PFR;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100813
814 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
815
Moritz Fischer96ec6312016-03-29 19:11:11 -0700816 for (; p < end; p++, offset += 4)
David S. Miller7a6e0702015-07-27 14:24:48 -0700817 *p += bp->macb_reg_readl(bp, offset);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +0100818}
819
Nicolas Ferree86cd532012-10-31 06:04:57 +0000820static int macb_halt_tx(struct macb *bp)
821{
822 unsigned long halt_time, timeout;
823 u32 status;
824
825 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
826
827 timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
828 do {
829 halt_time = jiffies;
830 status = macb_readl(bp, TSR);
831 if (!(status & MACB_BIT(TGO)))
832 return 0;
833
Jia-Ju Bai16fe10c2018-09-01 20:11:05 +0800834 udelay(250);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000835 } while (time_before(halt_time, timeout));
836
837 return -ETIMEDOUT;
838}
839
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200840static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
841{
842 if (tx_skb->mapping) {
843 if (tx_skb->mapped_as_page)
844 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
845 tx_skb->size, DMA_TO_DEVICE);
846 else
847 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
848 tx_skb->size, DMA_TO_DEVICE);
849 tx_skb->mapping = 0;
850 }
851
852 if (tx_skb->skb) {
853 dev_kfree_skb_any(tx_skb->skb);
854 tx_skb->skb = NULL;
855 }
856}
857
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000858static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
Harini Katakamfff80192016-08-09 13:15:53 +0530859{
Harini Katakamfff80192016-08-09 13:15:53 +0530860#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000861 struct macb_dma_desc_64 *desc_64;
862
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100863 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000864 desc_64 = macb_64b_desc(bp, desc);
865 desc_64->addrh = upper_32_bits(addr);
Anssi Hannulae100a892018-12-17 15:05:39 +0200866 /* The low bits of RX address contain the RX_USED bit, clearing
867 * of which allows packet RX. Make sure the high bits are also
868 * visible to HW at that point.
869 */
870 dma_wmb();
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000871 }
Harini Katakamfff80192016-08-09 13:15:53 +0530872#endif
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000873 desc->addr = lower_32_bits(addr);
874}
875
876static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
877{
878 dma_addr_t addr = 0;
879#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
880 struct macb_dma_desc_64 *desc_64;
881
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100882 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000883 desc_64 = macb_64b_desc(bp, desc);
884 addr = ((u64)(desc_64->addrh) << 32);
885 }
886#endif
887 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
888 return addr;
Harini Katakamfff80192016-08-09 13:15:53 +0530889}
890
Nicolas Ferree86cd532012-10-31 06:04:57 +0000891static void macb_tx_error_task(struct work_struct *work)
892{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100893 struct macb_queue *queue = container_of(work, struct macb_queue,
894 tx_error_task);
895 struct macb *bp = queue->bp;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000896 struct macb_tx_skb *tx_skb;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100897 struct macb_dma_desc *desc;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000898 struct sk_buff *skb;
899 unsigned int tail;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100900 unsigned long flags;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000901
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100902 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
903 (unsigned int)(queue - bp->queues),
904 queue->tx_tail, queue->tx_head);
905
906 /* Prevent the queue IRQ handlers from running: each of them may call
907 * macb_tx_interrupt(), which in turn may call netif_wake_subqueue().
908 * As explained below, we have to halt the transmission before updating
909 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
910 * network engine about the macb/gem being halted.
911 */
912 spin_lock_irqsave(&bp->lock, flags);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000913
914 /* Make sure nobody is trying to queue up new packets */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100915 netif_tx_stop_all_queues(bp->dev);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000916
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700917 /* Stop transmission now
Nicolas Ferree86cd532012-10-31 06:04:57 +0000918 * (in case we have just queued new packets)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100919 * macb/gem must be halted to write TBQP register
Nicolas Ferree86cd532012-10-31 06:04:57 +0000920 */
921 if (macb_halt_tx(bp))
922 /* Just complain for now, reinitializing TX path can be good */
923 netdev_err(bp->dev, "BUG: halt tx timed out\n");
924
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700925 /* Treat frames in TX queue including the ones that caused the error.
Nicolas Ferree86cd532012-10-31 06:04:57 +0000926 * Free transmit buffers in upper layer.
927 */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100928 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
929 u32 ctrl;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000930
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100931 desc = macb_tx_desc(queue, tail);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000932 ctrl = desc->ctrl;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100933 tx_skb = macb_tx_skb(queue, tail);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000934 skb = tx_skb->skb;
935
936 if (ctrl & MACB_BIT(TX_USED)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200937 /* skb is set for the last buffer of the frame */
938 while (!skb) {
939 macb_tx_unmap(bp, tx_skb);
940 tail++;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100941 tx_skb = macb_tx_skb(queue, tail);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200942 skb = tx_skb->skb;
943 }
944
945 /* ctrl still refers to the first buffer descriptor
946 * since it's the only one written back by the hardware
947 */
948 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
949 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
Zach Brownb410d132016-10-19 09:56:57 -0500950 macb_tx_ring_wrap(bp, tail),
951 skb->data);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200952 bp->dev->stats.tx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000953 queue->stats.tx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +0200954 bp->dev->stats.tx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +0000955 queue->stats.tx_bytes += skb->len;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200956 }
Nicolas Ferree86cd532012-10-31 06:04:57 +0000957 } else {
Moritz Fischer64ec42f2016-03-29 19:11:12 -0700958 /* "Buffers exhausted mid-frame" errors may only happen
959 * if the driver is buggy, so complain loudly about
960 * those. Statistics are updated by hardware.
Nicolas Ferree86cd532012-10-31 06:04:57 +0000961 */
962 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
963 netdev_err(bp->dev,
964 "BUG: TX buffers exhausted mid-frame\n");
965
966 desc->ctrl = ctrl | MACB_BIT(TX_USED);
967 }
968
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +0200969 macb_tx_unmap(bp, tx_skb);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000970 }
971
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100972 /* Set end of TX queue */
973 desc = macb_tx_desc(queue, 0);
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000974 macb_set_addr(bp, desc, 0);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100975 desc->ctrl = MACB_BIT(TX_USED);
976
Nicolas Ferree86cd532012-10-31 06:04:57 +0000977 /* Make descriptor updates visible to hardware */
978 wmb();
979
980 /* Reinitialize the TX desc queue */
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000981 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +0530982#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +0100983 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +0000984 queue_writel(queue, TBQPH, upper_32_bits(queue->tx_ring_dma));
Harini Katakamfff80192016-08-09 13:15:53 +0530985#endif
Nicolas Ferree86cd532012-10-31 06:04:57 +0000986 /* Make TX ring reflect state of hardware */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100987 queue->tx_head = 0;
988 queue->tx_tail = 0;
Nicolas Ferree86cd532012-10-31 06:04:57 +0000989
990 /* Housework before enabling TX IRQ */
991 macb_writel(bp, TSR, macb_readl(bp, TSR));
Cyrille Pitchen02c958d2014-12-12 13:26:44 +0100992 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
993
994 /* Now we are ready to start transmission again */
995 netif_tx_start_all_queues(bp->dev);
996 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
997
998 spin_unlock_irqrestore(&bp->lock, flags);
Nicolas Ferree86cd532012-10-31 06:04:57 +0000999}
1000
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001001static void macb_tx_interrupt(struct macb_queue *queue)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001002{
1003 unsigned int tail;
1004 unsigned int head;
1005 u32 status;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001006 struct macb *bp = queue->bp;
1007 u16 queue_index = queue - bp->queues;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001008
1009 status = macb_readl(bp, TSR);
1010 macb_writel(bp, TSR, status);
1011
Nicolas Ferre581df9e2013-05-14 03:00:16 +00001012 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001013 queue_writel(queue, ISR, MACB_BIT(TCOMP));
Steffen Trumtrar749a2b62013-03-27 23:07:05 +00001014
Nicolas Ferree86cd532012-10-31 06:04:57 +00001015 netdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001016 (unsigned long)status);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001017
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001018 head = queue->tx_head;
1019 for (tail = queue->tx_tail; tail != head; tail++) {
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001020 struct macb_tx_skb *tx_skb;
1021 struct sk_buff *skb;
1022 struct macb_dma_desc *desc;
1023 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001024
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001025 desc = macb_tx_desc(queue, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001026
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001027 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001028 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001029
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001030 ctrl = desc->ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001031
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001032 /* TX_USED bit is only set by hardware on the very first buffer
1033 * descriptor of the transmitted frame.
1034 */
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001035 if (!(ctrl & MACB_BIT(TX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001036 break;
1037
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001038 /* Process all buffers of the current transmitted frame */
1039 for (;; tail++) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001040 tx_skb = macb_tx_skb(queue, tail);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001041 skb = tx_skb->skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001042
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001043 /* First, update TX stats if needed */
1044 if (skb) {
Paul Thomasa6252042019-04-08 15:37:54 -04001045 if (unlikely(skb_shinfo(skb)->tx_flags &
1046 SKBTX_HW_TSTAMP) &&
1047 gem_ptp_do_txstamp(queue, skb, desc) == 0) {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01001048 /* skb now belongs to timestamp buffer
1049 * and will be removed later
1050 */
1051 tx_skb->skb = NULL;
1052 }
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001053 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
Zach Brownb410d132016-10-19 09:56:57 -05001054 macb_tx_ring_wrap(bp, tail),
1055 skb->data);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001056 bp->dev->stats.tx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001057 queue->stats.tx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001058 bp->dev->stats.tx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001059 queue->stats.tx_bytes += skb->len;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001060 }
1061
1062 /* Now we can safely release resources */
1063 macb_tx_unmap(bp, tx_skb);
1064
1065 /* skb is set only for the last buffer of the frame.
1066 * WARNING: at this point skb has been freed by
1067 * macb_tx_unmap().
1068 */
1069 if (skb)
1070 break;
1071 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001072 }
1073
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001074 queue->tx_tail = tail;
1075 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1076 CIRC_CNT(queue->tx_head, queue->tx_tail,
Zach Brownb410d132016-10-19 09:56:57 -05001077 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001078 netif_wake_subqueue(bp->dev, queue_index);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001079}
1080
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001081static void gem_rx_refill(struct macb_queue *queue)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001082{
1083 unsigned int entry;
1084 struct sk_buff *skb;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001085 dma_addr_t paddr;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001086 struct macb *bp = queue->bp;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001087 struct macb_dma_desc *desc;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001088
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001089 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1090 bp->rx_ring_size) > 0) {
1091 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001092
1093 /* Make hw descriptor updates visible to CPU */
1094 rmb();
1095
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001096 queue->rx_prepared_head++;
1097 desc = macb_rx_desc(queue, entry);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001098
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001099 if (!queue->rx_skbuff[entry]) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001100 /* allocate sk_buff for this free entry in ring */
1101 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
Moritz Fischeraa50b552016-03-29 19:11:13 -07001102 if (unlikely(!skb)) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00001103 netdev_err(bp->dev,
1104 "Unable to allocate sk_buff\n");
1105 break;
1106 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00001107
1108 /* now fill corresponding descriptor entry */
1109 paddr = dma_map_single(&bp->pdev->dev, skb->data,
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001110 bp->rx_buffer_size,
1111 DMA_FROM_DEVICE);
Soren Brinkmann92030902014-03-04 08:46:39 -08001112 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1113 dev_kfree_skb(skb);
1114 break;
1115 }
1116
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001117 queue->rx_skbuff[entry] = skb;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001118
Zach Brownb410d132016-10-19 09:56:57 -05001119 if (entry == bp->rx_ring_size - 1)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001120 paddr |= MACB_BIT(RX_WRAP);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001121 desc->ctrl = 0;
Anssi Hannula8159eca2018-12-17 15:05:40 +02001122 /* Setting addr clears RX_USED and allows reception,
1123 * make sure ctrl is cleared first to avoid a race.
1124 */
1125 dma_wmb();
1126 macb_set_addr(bp, desc, paddr);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001127
1128 /* properly align Ethernet header */
1129 skb_reserve(skb, NET_IP_ALIGN);
Punnaiah Choudary Kallurid4c216c2015-04-29 08:34:46 +05301130 } else {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001131 desc->ctrl = 0;
Anssi Hannula8159eca2018-12-17 15:05:40 +02001132 dma_wmb();
1133 desc->addr &= ~MACB_BIT(RX_USED);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001134 }
1135 }
1136
1137 /* Make descriptor updates visible to hardware */
1138 wmb();
1139
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001140 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1141 queue, queue->rx_prepared_head, queue->rx_tail);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001142}
1143
1144/* Mark DMA descriptors from begin up to and not including end as unused */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001145static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
Nicolas Ferre4df95132013-06-04 21:57:12 +00001146 unsigned int end)
1147{
1148 unsigned int frag;
1149
1150 for (frag = begin; frag != end; frag++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001151 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001152
Nicolas Ferre4df95132013-06-04 21:57:12 +00001153 desc->addr &= ~MACB_BIT(RX_USED);
1154 }
1155
1156 /* Make descriptor updates visible to hardware */
1157 wmb();
1158
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001159 /* When this happens, the hardware stats registers for
Nicolas Ferre4df95132013-06-04 21:57:12 +00001160 * whatever caused this is updated, so we don't have to record
1161 * anything.
1162 */
1163}
1164
Antoine Tenart97236cd2019-06-21 17:30:02 +02001165static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1166 int budget)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001167{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001168 struct macb *bp = queue->bp;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001169 unsigned int len;
1170 unsigned int entry;
1171 struct sk_buff *skb;
1172 struct macb_dma_desc *desc;
1173 int count = 0;
1174
1175 while (count < budget) {
Harini Katakamfff80192016-08-09 13:15:53 +05301176 u32 ctrl;
1177 dma_addr_t addr;
1178 bool rxused;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001179
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001180 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1181 desc = macb_rx_desc(queue, entry);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001182
1183 /* Make hw descriptor updates visible to CPU */
1184 rmb();
1185
Harini Katakamfff80192016-08-09 13:15:53 +05301186 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001187 addr = macb_get_addr(bp, desc);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001188
Harini Katakamfff80192016-08-09 13:15:53 +05301189 if (!rxused)
Nicolas Ferre4df95132013-06-04 21:57:12 +00001190 break;
1191
Anssi Hannula6e0af292018-12-17 15:05:41 +02001192 /* Ensure ctrl is at least as up-to-date as rxused */
1193 dma_rmb();
1194
1195 ctrl = desc->ctrl;
1196
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001197 queue->rx_tail++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001198 count++;
1199
1200 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1201 netdev_err(bp->dev,
1202 "not whole frame pointed by descriptor\n");
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001203 bp->dev->stats.rx_dropped++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001204 queue->stats.rx_dropped++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001205 break;
1206 }
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001207 skb = queue->rx_skbuff[entry];
Nicolas Ferre4df95132013-06-04 21:57:12 +00001208 if (unlikely(!skb)) {
1209 netdev_err(bp->dev,
1210 "inconsistent Rx descriptor chain\n");
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001211 bp->dev->stats.rx_dropped++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001212 queue->stats.rx_dropped++;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001213 break;
1214 }
1215 /* now everything is ready for receiving packet */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001216 queue->rx_skbuff[entry] = NULL;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05301217 len = ctrl & bp->rx_frm_len_mask;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001218
1219 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1220
1221 skb_put(skb, len);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001222 dma_unmap_single(&bp->pdev->dev, addr,
Soren Brinkmann48330e082014-03-04 08:46:40 -08001223 bp->rx_buffer_size, DMA_FROM_DEVICE);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001224
1225 skb->protocol = eth_type_trans(skb, bp->dev);
1226 skb_checksum_none_assert(skb);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02001227 if (bp->dev->features & NETIF_F_RXCSUM &&
1228 !(bp->dev->flags & IFF_PROMISC) &&
1229 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1230 skb->ip_summed = CHECKSUM_UNNECESSARY;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001231
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001232 bp->dev->stats.rx_packets++;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001233 queue->stats.rx_packets++;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001234 bp->dev->stats.rx_bytes += skb->len;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00001235 queue->stats.rx_bytes += skb->len;
Nicolas Ferre4df95132013-06-04 21:57:12 +00001236
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01001237 gem_ptp_do_rxstamp(bp, skb, desc);
1238
Nicolas Ferre4df95132013-06-04 21:57:12 +00001239#if defined(DEBUG) && defined(VERBOSE_DEBUG)
1240 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1241 skb->len, skb->csum);
1242 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
Cyrille Pitchen51f83012014-12-11 11:15:54 +01001243 skb_mac_header(skb), 16, true);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001244 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1245 skb->data, 32, true);
1246#endif
1247
Antoine Tenart97236cd2019-06-21 17:30:02 +02001248 napi_gro_receive(napi, skb);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001249 }
1250
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001251 gem_rx_refill(queue);
Nicolas Ferre4df95132013-06-04 21:57:12 +00001252
1253 return count;
1254}
1255
Antoine Tenart97236cd2019-06-21 17:30:02 +02001256static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1257 unsigned int first_frag, unsigned int last_frag)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001258{
1259 unsigned int len;
1260 unsigned int frag;
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001261 unsigned int offset;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001262 struct sk_buff *skb;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001263 struct macb_dma_desc *desc;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001264 struct macb *bp = queue->bp;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001265
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001266 desc = macb_rx_desc(queue, last_frag);
Harini Katakam98b5a0f42015-05-06 22:27:17 +05301267 len = desc->ctrl & bp->rx_frm_len_mask;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001268
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001269 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
Zach Brownb410d132016-10-19 09:56:57 -05001270 macb_rx_ring_wrap(bp, first_frag),
1271 macb_rx_ring_wrap(bp, last_frag), len);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001272
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001273 /* The ethernet header starts NET_IP_ALIGN bytes into the
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001274 * first buffer. Since the header is 14 bytes, this makes the
1275 * payload word-aligned.
1276 *
1277 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1278 * the two padding bytes into the skb so that we avoid hitting
1279 * the slowpath in memcpy(), and pull them off afterwards.
1280 */
1281 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001282 if (!skb) {
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001283 bp->dev->stats.rx_dropped++;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001284 for (frag = first_frag; ; frag++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001285 desc = macb_rx_desc(queue, frag);
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001286 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001287 if (frag == last_frag)
1288 break;
1289 }
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001290
1291 /* Make descriptor updates visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001292 wmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001293
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001294 return 1;
1295 }
1296
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001297 offset = 0;
1298 len += NET_IP_ALIGN;
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001299 skb_checksum_none_assert(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001300 skb_put(skb, len);
1301
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001302 for (frag = first_frag; ; frag++) {
Nicolas Ferre1b447912013-06-04 21:57:11 +00001303 unsigned int frag_len = bp->rx_buffer_size;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001304
1305 if (offset + frag_len > len) {
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001306 if (unlikely(frag != last_frag)) {
1307 dev_kfree_skb_any(skb);
1308 return -1;
1309 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001310 frag_len = len - offset;
1311 }
Arnaldo Carvalho de Melo27d7ff42007-03-31 11:55:19 -03001312 skb_copy_to_linear_data_offset(skb, offset,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001313 macb_rx_buffer(queue, frag),
Moritz Fischeraa50b552016-03-29 19:11:13 -07001314 frag_len);
Nicolas Ferre1b447912013-06-04 21:57:11 +00001315 offset += bp->rx_buffer_size;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001316 desc = macb_rx_desc(queue, frag);
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001317 desc->addr &= ~MACB_BIT(RX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001318
1319 if (frag == last_frag)
1320 break;
1321 }
1322
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001323 /* Make descriptor updates visible to hardware */
1324 wmb();
1325
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00001326 __skb_pull(skb, NET_IP_ALIGN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001327 skb->protocol = eth_type_trans(skb, bp->dev);
1328
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02001329 bp->dev->stats.rx_packets++;
1330 bp->dev->stats.rx_bytes += skb->len;
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001331 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001332 skb->len, skb->csum);
Antoine Tenart97236cd2019-06-21 17:30:02 +02001333 napi_gro_receive(napi, skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001334
1335 return 0;
1336}
1337
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001338static inline void macb_init_rx_ring(struct macb_queue *queue)
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001339{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001340 struct macb *bp = queue->bp;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001341 dma_addr_t addr;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001342 struct macb_dma_desc *desc = NULL;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001343 int i;
1344
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001345 addr = queue->rx_buffers_dma;
Zach Brownb410d132016-10-19 09:56:57 -05001346 for (i = 0; i < bp->rx_ring_size; i++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001347 desc = macb_rx_desc(queue, i);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001348 macb_set_addr(bp, desc, addr);
1349 desc->ctrl = 0;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001350 addr += bp->rx_buffer_size;
1351 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001352 desc->addr |= MACB_BIT(RX_WRAP);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001353 queue->rx_tail = 0;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001354}
1355
Antoine Tenart97236cd2019-06-21 17:30:02 +02001356static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1357 int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001358{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001359 struct macb *bp = queue->bp;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001360 bool reset_rx_queue = false;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001361 int received = 0;
Havard Skinnemoen55054a12012-10-31 06:04:55 +00001362 unsigned int tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001363 int first_frag = -1;
1364
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001365 for (tail = queue->rx_tail; budget > 0; tail++) {
1366 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001367 u32 ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001368
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001369 /* Make hw descriptor updates visible to CPU */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001370 rmb();
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00001371
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001372 if (!(desc->addr & MACB_BIT(RX_USED)))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001373 break;
1374
Anssi Hannula6e0af292018-12-17 15:05:41 +02001375 /* Ensure ctrl is at least as up-to-date as addr */
1376 dma_rmb();
1377
1378 ctrl = desc->ctrl;
1379
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001380 if (ctrl & MACB_BIT(RX_SOF)) {
1381 if (first_frag != -1)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001382 discard_partial_frame(queue, first_frag, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001383 first_frag = tail;
1384 }
1385
1386 if (ctrl & MACB_BIT(RX_EOF)) {
1387 int dropped;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001388
1389 if (unlikely(first_frag == -1)) {
1390 reset_rx_queue = true;
1391 continue;
1392 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001393
Antoine Tenart97236cd2019-06-21 17:30:02 +02001394 dropped = macb_rx_frame(queue, napi, first_frag, tail);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001395 first_frag = -1;
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001396 if (unlikely(dropped < 0)) {
1397 reset_rx_queue = true;
1398 continue;
1399 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001400 if (!dropped) {
1401 received++;
1402 budget--;
1403 }
1404 }
1405 }
1406
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001407 if (unlikely(reset_rx_queue)) {
1408 unsigned long flags;
1409 u32 ctrl;
1410
1411 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1412
1413 spin_lock_irqsave(&bp->lock, flags);
1414
1415 ctrl = macb_readl(bp, NCR);
1416 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1417
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001418 macb_init_rx_ring(queue);
1419 queue_writel(queue, RBQP, queue->rx_ring_dma);
Cyrille Pitchen9ba723b2016-03-25 10:37:34 +01001420
1421 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1422
1423 spin_unlock_irqrestore(&bp->lock, flags);
1424 return received;
1425 }
1426
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001427 if (first_frag != -1)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001428 queue->rx_tail = first_frag;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001429 else
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001430 queue->rx_tail = tail;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001431
1432 return received;
1433}
1434
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001435static int macb_poll(struct napi_struct *napi, int budget)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001436{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001437 struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
1438 struct macb *bp = queue->bp;
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001439 int work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001440 u32 status;
1441
1442 status = macb_readl(bp, RSR);
1443 macb_writel(bp, RSR, status);
1444
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001445 netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
Moritz Fischeraa50b552016-03-29 19:11:13 -07001446 (unsigned long)status, budget);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001447
Antoine Tenart97236cd2019-06-21 17:30:02 +02001448 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
Joshua Hokeb3363692010-10-25 01:44:22 +00001449 if (work_done < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001450 napi_complete_done(napi, work_done);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001451
Nicolas Ferre8770e912013-02-12 11:08:48 +01001452 /* Packets received while interrupts were disabled */
1453 status = macb_readl(bp, RSR);
Soren Brinkmann504ad982014-05-04 15:43:01 -07001454 if (status) {
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001455 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001456 queue_writel(queue, ISR, MACB_BIT(RCOMP));
Nicolas Ferre8770e912013-02-12 11:08:48 +01001457 napi_reschedule(napi);
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001458 } else {
Harini Katakame5010702019-01-29 15:20:03 +05301459 queue_writel(queue, IER, bp->rx_intr_mask);
Soren Brinkmann02f7a342014-05-04 15:43:00 -07001460 }
Joshua Hokeb3363692010-10-25 01:44:22 +00001461 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001462
1463 /* TODO: Handle errors */
1464
Stephen Hemmingerbea33482007-10-03 16:41:36 -07001465 return work_done;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001466}
1467
Allen Paise7412b82020-09-14 12:59:23 +05301468static void macb_hresp_error_task(struct tasklet_struct *t)
Harini Katakam032dc412018-01-27 12:09:01 +05301469{
Allen Paise7412b82020-09-14 12:59:23 +05301470 struct macb *bp = from_tasklet(bp, t, hresp_err_tasklet);
Harini Katakam032dc412018-01-27 12:09:01 +05301471 struct net_device *dev = bp->dev;
Claudiu Beznea580d3952020-07-02 12:06:00 +03001472 struct macb_queue *queue;
Harini Katakam032dc412018-01-27 12:09:01 +05301473 unsigned int q;
1474 u32 ctrl;
1475
1476 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Harini Katakame5010702019-01-29 15:20:03 +05301477 queue_writel(queue, IDR, bp->rx_intr_mask |
Harini Katakam032dc412018-01-27 12:09:01 +05301478 MACB_TX_INT_FLAGS |
1479 MACB_BIT(HRESP));
1480 }
1481 ctrl = macb_readl(bp, NCR);
1482 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
1483 macb_writel(bp, NCR, ctrl);
1484
1485 netif_tx_stop_all_queues(dev);
1486 netif_carrier_off(dev);
1487
1488 bp->macbgem_ops.mog_init_rings(bp);
1489
1490 /* Initialize TX and RX buffers */
Antoine Tenart6e952d92019-11-13 10:00:05 +01001491 macb_init_buffers(bp);
Harini Katakam032dc412018-01-27 12:09:01 +05301492
Antoine Tenart6e952d92019-11-13 10:00:05 +01001493 /* Enable interrupts */
1494 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
Harini Katakam032dc412018-01-27 12:09:01 +05301495 queue_writel(queue, IER,
Harini Katakame5010702019-01-29 15:20:03 +05301496 bp->rx_intr_mask |
Harini Katakam032dc412018-01-27 12:09:01 +05301497 MACB_TX_INT_FLAGS |
1498 MACB_BIT(HRESP));
Harini Katakam032dc412018-01-27 12:09:01 +05301499
1500 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
1501 macb_writel(bp, NCR, ctrl);
1502
1503 netif_carrier_on(dev);
1504 netif_tx_start_all_queues(dev);
1505}
1506
Claudiu Beznea42983882018-12-17 10:02:42 +00001507static void macb_tx_restart(struct macb_queue *queue)
1508{
1509 unsigned int head = queue->tx_head;
1510 unsigned int tail = queue->tx_tail;
1511 struct macb *bp = queue->bp;
1512
1513 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1514 queue_writel(queue, ISR, MACB_BIT(TXUBR));
1515
1516 if (head == tail)
1517 return;
1518
1519 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1520}
1521
Nicolas Ferre9d45c8e2020-07-20 10:56:53 +02001522static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
1523{
1524 struct macb_queue *queue = dev_id;
1525 struct macb *bp = queue->bp;
1526 u32 status;
1527
1528 status = queue_readl(queue, ISR);
1529
1530 if (unlikely(!status))
1531 return IRQ_NONE;
1532
1533 spin_lock(&bp->lock);
1534
1535 if (status & MACB_BIT(WOL)) {
1536 queue_writel(queue, IDR, MACB_BIT(WOL));
1537 macb_writel(bp, WOL, 0);
1538 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
1539 (unsigned int)(queue - bp->queues),
1540 (unsigned long)status);
1541 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1542 queue_writel(queue, ISR, MACB_BIT(WOL));
1543 pm_wakeup_event(&bp->pdev->dev, 0);
1544 }
1545
1546 spin_unlock(&bp->lock);
1547
1548 return IRQ_HANDLED;
1549}
1550
Nicolas Ferre558e35c2020-07-20 10:56:52 +02001551static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
1552{
1553 struct macb_queue *queue = dev_id;
1554 struct macb *bp = queue->bp;
1555 u32 status;
1556
1557 status = queue_readl(queue, ISR);
1558
1559 if (unlikely(!status))
1560 return IRQ_NONE;
1561
1562 spin_lock(&bp->lock);
1563
1564 if (status & GEM_BIT(WOL)) {
1565 queue_writel(queue, IDR, GEM_BIT(WOL));
1566 gem_writel(bp, WOL, 0);
1567 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
1568 (unsigned int)(queue - bp->queues),
1569 (unsigned long)status);
1570 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1571 queue_writel(queue, ISR, GEM_BIT(WOL));
1572 pm_wakeup_event(&bp->pdev->dev, 0);
1573 }
1574
1575 spin_unlock(&bp->lock);
1576
1577 return IRQ_HANDLED;
1578}
1579
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001580static irqreturn_t macb_interrupt(int irq, void *dev_id)
1581{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001582 struct macb_queue *queue = dev_id;
1583 struct macb *bp = queue->bp;
1584 struct net_device *dev = bp->dev;
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001585 u32 status, ctrl;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001586
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001587 status = queue_readl(queue, ISR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001588
1589 if (unlikely(!status))
1590 return IRQ_NONE;
1591
1592 spin_lock(&bp->lock);
1593
1594 while (status) {
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001595 /* close possible race with dev_close */
1596 if (unlikely(!netif_running(dev))) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001597 queue_writel(queue, IDR, -1);
Nathan Sullivan24468372016-01-14 13:27:27 -06001598 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
1599 queue_writel(queue, ISR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001600 break;
1601 }
1602
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001603 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
1604 (unsigned int)(queue - bp->queues),
1605 (unsigned long)status);
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001606
Harini Katakame5010702019-01-29 15:20:03 +05301607 if (status & bp->rx_intr_mask) {
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001608 /* There's no point taking any more interrupts
Joshua Hokeb3363692010-10-25 01:44:22 +00001609 * until we have processed the buffers. The
1610 * scheduling call may fail if the poll routine
1611 * is already scheduled, so disable interrupts
1612 * now.
1613 */
Harini Katakame5010702019-01-29 15:20:03 +05301614 queue_writel(queue, IDR, bp->rx_intr_mask);
Nicolas Ferre581df9e2013-05-14 03:00:16 +00001615 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001616 queue_writel(queue, ISR, MACB_BIT(RCOMP));
Joshua Hokeb3363692010-10-25 01:44:22 +00001617
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001618 if (napi_schedule_prep(&queue->napi)) {
Havard Skinnemoena268adb2012-10-31 06:04:52 +00001619 netdev_vdbg(bp->dev, "scheduling RX softirq\n");
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00001620 __napi_schedule(&queue->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001621 }
1622 }
1623
Nicolas Ferree86cd532012-10-31 06:04:57 +00001624 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001625 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
1626 schedule_work(&queue->tx_error_task);
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001627
1628 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001629 queue_writel(queue, ISR, MACB_TX_ERR_FLAGS);
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001630
Nicolas Ferree86cd532012-10-31 06:04:57 +00001631 break;
1632 }
1633
1634 if (status & MACB_BIT(TCOMP))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001635 macb_tx_interrupt(queue);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001636
Claudiu Beznea42983882018-12-17 10:02:42 +00001637 if (status & MACB_BIT(TXUBR))
1638 macb_tx_restart(queue);
1639
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001640 /* Link change detection isn't possible with RMII, so we'll
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001641 * add that if/when we get our hands on a full-blown MII PHY.
1642 */
1643
Nathan Sullivan86b5e7d2015-05-13 17:01:36 -05001644 /* There is a hardware issue under heavy load where DMA can
1645 * stop, this causes endless "used buffer descriptor read"
1646 * interrupts but it can be cleared by re-enabling RX. See
Harini Katakame5010702019-01-29 15:20:03 +05301647 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
1648 * section 16.7.4 for details. RXUBR is only enabled for
1649 * these two versions.
Nathan Sullivan86b5e7d2015-05-13 17:01:36 -05001650 */
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001651 if (status & MACB_BIT(RXUBR)) {
1652 ctrl = macb_readl(bp, NCR);
1653 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
Zumeng Chenffac0e92016-11-28 21:55:00 +08001654 wmb();
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001655 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1656
1657 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchenba504992016-03-24 15:40:04 +01001658 queue_writel(queue, ISR, MACB_BIT(RXUBR));
Nathan Sullivanbfbb92c2015-05-05 15:00:25 -05001659 }
1660
Alexander Steinb19f7f72011-04-13 05:03:24 +00001661 if (status & MACB_BIT(ISR_ROVR)) {
1662 /* We missed at least one packet */
Jamie Ilesf75ba502011-11-08 10:12:32 +00001663 if (macb_is_gem(bp))
1664 bp->hw_stats.gem.rx_overruns++;
1665 else
1666 bp->hw_stats.macb.rx_overruns++;
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001667
1668 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001669 queue_writel(queue, ISR, MACB_BIT(ISR_ROVR));
Alexander Steinb19f7f72011-04-13 05:03:24 +00001670 }
1671
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001672 if (status & MACB_BIT(HRESP)) {
Harini Katakam032dc412018-01-27 12:09:01 +05301673 tasklet_schedule(&bp->hresp_err_tasklet);
Jamie Ilesc220f8c2011-03-08 20:27:08 +00001674 netdev_err(dev, "DMA bus error: HRESP not OK\n");
Soren Brinkmann6a027b72014-05-04 15:42:59 -07001675
1676 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001677 queue_writel(queue, ISR, MACB_BIT(HRESP));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001678 }
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001679 status = queue_readl(queue, ISR);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001680 }
1681
1682 spin_unlock(&bp->lock);
1683
1684 return IRQ_HANDLED;
1685}
1686
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001687#ifdef CONFIG_NET_POLL_CONTROLLER
Moritz Fischer64ec42f2016-03-29 19:11:12 -07001688/* Polling receive - used by netconsole and other diagnostic tools
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001689 * to allow network i/o with interrupts disabled.
1690 */
1691static void macb_poll_controller(struct net_device *dev)
1692{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001693 struct macb *bp = netdev_priv(dev);
1694 struct macb_queue *queue;
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001695 unsigned long flags;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001696 unsigned int q;
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001697
1698 local_irq_save(flags);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001699 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
1700 macb_interrupt(dev->irq, queue);
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07001701 local_irq_restore(flags);
1702}
1703#endif
1704
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001705static unsigned int macb_tx_map(struct macb *bp,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001706 struct macb_queue *queue,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001707 struct sk_buff *skb,
1708 unsigned int hdrlen)
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001709{
1710 dma_addr_t mapping;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001711 unsigned int len, entry, i, tx_head = queue->tx_head;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001712 struct macb_tx_skb *tx_skb = NULL;
1713 struct macb_dma_desc *desc;
1714 unsigned int offset, size, count = 0;
1715 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001716 unsigned int eof = 1, mss_mfs = 0;
1717 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
1718
1719 /* LSO */
1720 if (skb_shinfo(skb)->gso_size != 0) {
1721 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
1722 /* UDP - UFO */
1723 lso_ctrl = MACB_LSO_UFO_ENABLE;
1724 else
1725 /* TCP - TSO */
1726 lso_ctrl = MACB_LSO_TSO_ENABLE;
1727 }
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001728
1729 /* First, map non-paged data */
1730 len = skb_headlen(skb);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001731
1732 /* first buffer length */
1733 size = hdrlen;
1734
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001735 offset = 0;
1736 while (len) {
Zach Brownb410d132016-10-19 09:56:57 -05001737 entry = macb_tx_ring_wrap(bp, tx_head);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001738 tx_skb = &queue->tx_skb[entry];
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001739
1740 mapping = dma_map_single(&bp->pdev->dev,
1741 skb->data + offset,
1742 size, DMA_TO_DEVICE);
1743 if (dma_mapping_error(&bp->pdev->dev, mapping))
1744 goto dma_error;
1745
1746 /* Save info to properly release resources */
1747 tx_skb->skb = NULL;
1748 tx_skb->mapping = mapping;
1749 tx_skb->size = size;
1750 tx_skb->mapped_as_page = false;
1751
1752 len -= size;
1753 offset += size;
1754 count++;
1755 tx_head++;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001756
1757 size = min(len, bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001758 }
1759
1760 /* Then, map paged data from fragments */
1761 for (f = 0; f < nr_frags; f++) {
1762 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1763
1764 len = skb_frag_size(frag);
1765 offset = 0;
1766 while (len) {
1767 size = min(len, bp->max_tx_length);
Zach Brownb410d132016-10-19 09:56:57 -05001768 entry = macb_tx_ring_wrap(bp, tx_head);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001769 tx_skb = &queue->tx_skb[entry];
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001770
1771 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
1772 offset, size, DMA_TO_DEVICE);
1773 if (dma_mapping_error(&bp->pdev->dev, mapping))
1774 goto dma_error;
1775
1776 /* Save info to properly release resources */
1777 tx_skb->skb = NULL;
1778 tx_skb->mapping = mapping;
1779 tx_skb->size = size;
1780 tx_skb->mapped_as_page = true;
1781
1782 len -= size;
1783 offset += size;
1784 count++;
1785 tx_head++;
1786 }
1787 }
1788
1789 /* Should never happen */
Moritz Fischeraa50b552016-03-29 19:11:13 -07001790 if (unlikely(!tx_skb)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001791 netdev_err(bp->dev, "BUG! empty skb!\n");
1792 return 0;
1793 }
1794
1795 /* This is the last buffer of the frame: save socket buffer */
1796 tx_skb->skb = skb;
1797
1798 /* Update TX ring: update buffer descriptors in reverse order
1799 * to avoid race condition
1800 */
1801
1802 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
1803 * to set the end of TX queue
1804 */
1805 i = tx_head;
Zach Brownb410d132016-10-19 09:56:57 -05001806 entry = macb_tx_ring_wrap(bp, i);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001807 ctrl = MACB_BIT(TX_USED);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001808 desc = macb_tx_desc(queue, entry);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001809 desc->ctrl = ctrl;
1810
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001811 if (lso_ctrl) {
1812 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
1813 /* include header and FCS in value given to h/w */
1814 mss_mfs = skb_shinfo(skb)->gso_size +
1815 skb_transport_offset(skb) +
1816 ETH_FCS_LEN;
1817 else /* TSO */ {
1818 mss_mfs = skb_shinfo(skb)->gso_size;
1819 /* TCP Sequence Number Source Select
1820 * can be set only for TSO
1821 */
1822 seq_ctrl = 0;
1823 }
1824 }
1825
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001826 do {
1827 i--;
Zach Brownb410d132016-10-19 09:56:57 -05001828 entry = macb_tx_ring_wrap(bp, i);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001829 tx_skb = &queue->tx_skb[entry];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001830 desc = macb_tx_desc(queue, entry);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001831
1832 ctrl = (u32)tx_skb->size;
1833 if (eof) {
1834 ctrl |= MACB_BIT(TX_LAST);
1835 eof = 0;
1836 }
Zach Brownb410d132016-10-19 09:56:57 -05001837 if (unlikely(entry == (bp->tx_ring_size - 1)))
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001838 ctrl |= MACB_BIT(TX_WRAP);
1839
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001840 /* First descriptor is header descriptor */
1841 if (i == queue->tx_head) {
1842 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
1843 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001844 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
1845 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl)
1846 ctrl |= MACB_BIT(TX_NOCRC);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001847 } else
1848 /* Only set MSS/MFS on payload descriptors
1849 * (second or later descriptor)
1850 */
1851 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
1852
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001853 /* Set TX buffer descriptor */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00001854 macb_set_addr(bp, desc, tx_skb->mapping);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001855 /* desc->addr must be visible to hardware before clearing
1856 * 'TX_USED' bit in desc->ctrl.
1857 */
1858 wmb();
1859 desc->ctrl = ctrl;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001860 } while (i != queue->tx_head);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001861
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001862 queue->tx_head = tx_head;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001863
1864 return count;
1865
1866dma_error:
1867 netdev_err(bp->dev, "TX DMA map failed\n");
1868
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001869 for (i = queue->tx_head; i != tx_head; i++) {
1870 tx_skb = macb_tx_skb(queue, i);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02001871
1872 macb_tx_unmap(bp, tx_skb);
1873 }
1874
1875 return 0;
1876}
1877
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001878static netdev_features_t macb_features_check(struct sk_buff *skb,
1879 struct net_device *dev,
1880 netdev_features_t features)
1881{
1882 unsigned int nr_frags, f;
1883 unsigned int hdrlen;
1884
1885 /* Validate LSO compatibility */
1886
Harini Katakam41c1ef92020-02-05 18:08:11 +05301887 /* there is only one buffer or protocol is not UDP */
1888 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001889 return features;
1890
1891 /* length of header */
1892 hdrlen = skb_transport_offset(skb);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001893
Harini Katakam41c1ef92020-02-05 18:08:11 +05301894 /* For UFO only:
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001895 * When software supplies two or more payload buffers all payload buffers
1896 * apart from the last must be a multiple of 8 bytes in size.
1897 */
1898 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
1899 return features & ~MACB_NETIF_LSO;
1900
1901 nr_frags = skb_shinfo(skb)->nr_frags;
1902 /* No need to check last fragment */
1903 nr_frags--;
1904 for (f = 0; f < nr_frags; f++) {
1905 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
1906
1907 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
1908 return features & ~MACB_NETIF_LSO;
1909 }
1910 return features;
1911}
1912
Helmut Buchsbaum007e4ba2016-09-04 18:09:47 +02001913static inline int macb_clear_csum(struct sk_buff *skb)
1914{
1915 /* no change for packets without checksum offloading */
1916 if (skb->ip_summed != CHECKSUM_PARTIAL)
1917 return 0;
1918
1919 /* make sure we can modify the header */
1920 if (unlikely(skb_cow_head(skb, 0)))
1921 return -1;
1922
1923 /* initialize checksum field
1924 * This is required - at least for Zynq, which otherwise calculates
1925 * wrong UDP header checksums for UDP packets with UDP data len <=2
1926 */
1927 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
1928 return 0;
1929}
1930
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001931static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
1932{
1933 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb);
1934 int padlen = ETH_ZLEN - (*skb)->len;
1935 int headroom = skb_headroom(*skb);
1936 int tailroom = skb_tailroom(*skb);
1937 struct sk_buff *nskb;
1938 u32 fcs;
1939
1940 if (!(ndev->features & NETIF_F_HW_CSUM) ||
1941 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
1942 skb_shinfo(*skb)->gso_size) /* Not available for GSO */
1943 return 0;
1944
1945 if (padlen <= 0) {
1946 /* FCS could be appeded to tailroom. */
1947 if (tailroom >= ETH_FCS_LEN)
1948 goto add_fcs;
1949 /* FCS could be appeded by moving data to headroom. */
1950 else if (!cloned && headroom + tailroom >= ETH_FCS_LEN)
1951 padlen = 0;
1952 /* No room for FCS, need to reallocate skb. */
1953 else
Tristram Ha899ecae2018-10-24 14:51:23 -07001954 padlen = ETH_FCS_LEN;
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001955 } else {
1956 /* Add room for FCS. */
1957 padlen += ETH_FCS_LEN;
1958 }
1959
1960 if (!cloned && headroom + tailroom >= padlen) {
1961 (*skb)->data = memmove((*skb)->head, (*skb)->data, (*skb)->len);
1962 skb_set_tail_pointer(*skb, (*skb)->len);
1963 } else {
1964 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
1965 if (!nskb)
1966 return -ENOMEM;
1967
Huang Zijiangf3e5c072019-02-14 14:41:18 +08001968 dev_consume_skb_any(*skb);
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001969 *skb = nskb;
1970 }
1971
Claudiu Bezneaba3e1842019-01-03 14:59:35 +00001972 if (padlen > ETH_FCS_LEN)
1973 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
Claudiu Beznea653e92a2018-08-07 12:25:14 +03001974
1975add_fcs:
1976 /* set FCS to packet */
1977 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
1978 fcs = ~fcs;
1979
1980 skb_put_u8(*skb, fcs & 0xff);
1981 skb_put_u8(*skb, (fcs >> 8) & 0xff);
1982 skb_put_u8(*skb, (fcs >> 16) & 0xff);
1983 skb_put_u8(*skb, (fcs >> 24) & 0xff);
1984
1985 return 0;
1986}
1987
Claudiu Beznead1c38952018-08-07 12:25:12 +03001988static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001989{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001990 u16 queue_index = skb_get_queue_mapping(skb);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01001991 struct macb *bp = netdev_priv(dev);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01001992 struct macb_queue *queue = &bp->queues[queue_index];
Dongdong Deng48719532009-08-23 19:49:07 -07001993 unsigned long flags;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001994 unsigned int desc_cnt, nr_frags, frag_size, f;
1995 unsigned int hdrlen;
Claudiu Beznea8932b5a2020-07-02 12:06:01 +03001996 bool is_lso;
Claudiu Beznead1c38952018-08-07 12:25:12 +03001997 netdev_tx_t ret = NETDEV_TX_OK;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00001998
Claudiu Beznea33729f22018-08-07 12:25:13 +03001999 if (macb_clear_csum(skb)) {
2000 dev_kfree_skb_any(skb);
2001 return ret;
2002 }
2003
Claudiu Beznea653e92a2018-08-07 12:25:14 +03002004 if (macb_pad_and_fcs(&skb, dev)) {
2005 dev_kfree_skb_any(skb);
2006 return ret;
2007 }
2008
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002009 is_lso = (skb_shinfo(skb)->gso_size != 0);
2010
2011 if (is_lso) {
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002012 /* length of headers */
Claudiu Beznea8932b5a2020-07-02 12:06:01 +03002013 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002014 /* only queue eth + ip headers separately for UDP */
2015 hdrlen = skb_transport_offset(skb);
2016 else
2017 hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb);
2018 if (skb_headlen(skb) < hdrlen) {
2019 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2020 /* if this is required, would need to copy to single buffer */
2021 return NETDEV_TX_BUSY;
2022 }
2023 } else
2024 hdrlen = min(skb_headlen(skb), bp->max_tx_length);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002025
Havard Skinnemoena268adb2012-10-31 06:04:52 +00002026#if defined(DEBUG) && defined(VERBOSE_DEBUG)
2027 netdev_vdbg(bp->dev,
Moritz Fischeraa50b552016-03-29 19:11:13 -07002028 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2029 queue_index, skb->len, skb->head, skb->data,
2030 skb_tail_pointer(skb), skb_end_pointer(skb));
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002031 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
2032 skb->data, 16, true);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002033#endif
2034
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02002035 /* Count how many TX buffer descriptors are needed to send this
2036 * socket buffer: skb fragments of jumbo frames may need to be
Moritz Fischeraa50b552016-03-29 19:11:13 -07002037 * split into many buffer descriptors.
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02002038 */
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002039 if (is_lso && (skb_headlen(skb) > hdrlen))
2040 /* extra header descriptor if also payload in first buffer */
2041 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2042 else
2043 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02002044 nr_frags = skb_shinfo(skb)->nr_frags;
2045 for (f = 0; f < nr_frags; f++) {
2046 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002047 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02002048 }
2049
Dongdong Deng48719532009-08-23 19:49:07 -07002050 spin_lock_irqsave(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002051
2052 /* This is a hard error, log it. */
Zach Brownb410d132016-10-19 09:56:57 -05002053 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002054 bp->tx_ring_size) < desc_cnt) {
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002055 netif_stop_subqueue(dev, queue_index);
Dongdong Deng48719532009-08-23 19:49:07 -07002056 spin_unlock_irqrestore(&bp->lock, flags);
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002057 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002058 queue->tx_head, queue->tx_tail);
Patrick McHardy5b548142009-06-12 06:22:29 +00002059 return NETDEV_TX_BUSY;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002060 }
2061
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02002062 /* Map socket buffer for DMA transfer */
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00002063 if (!macb_tx_map(bp, queue, skb, hdrlen)) {
Eric W. Biedermanc88b5b62014-03-15 16:08:27 -07002064 dev_kfree_skb_any(skb);
Soren Brinkmann92030902014-03-04 08:46:39 -08002065 goto unlock;
2066 }
Havard Skinnemoen55054a12012-10-31 06:04:55 +00002067
Havard Skinnemoen03dbe052012-10-31 06:04:51 +00002068 /* Make newly initialized descriptor visible to hardware */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002069 wmb();
Richard Cochrane0720922011-06-19 21:51:28 +00002070 skb_tx_timestamp(skb);
2071
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002072 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2073
Zach Brownb410d132016-10-19 09:56:57 -05002074 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002075 netif_stop_subqueue(dev, queue_index);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002076
Soren Brinkmann92030902014-03-04 08:46:39 -08002077unlock:
Dongdong Deng48719532009-08-23 19:49:07 -07002078 spin_unlock_irqrestore(&bp->lock, flags);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002079
Claudiu Beznead1c38952018-08-07 12:25:12 +03002080 return ret;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002081}
2082
Nicolas Ferre4df95132013-06-04 21:57:12 +00002083static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
Nicolas Ferre1b447912013-06-04 21:57:11 +00002084{
2085 if (!macb_is_gem(bp)) {
2086 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2087 } else {
Nicolas Ferre4df95132013-06-04 21:57:12 +00002088 bp->rx_buffer_size = size;
Nicolas Ferre1b447912013-06-04 21:57:11 +00002089
Nicolas Ferre1b447912013-06-04 21:57:11 +00002090 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00002091 netdev_dbg(bp->dev,
Moritz Fischeraa50b552016-03-29 19:11:13 -07002092 "RX buffer must be multiple of %d bytes, expanding\n",
2093 RX_BUFFER_MULTIPLE);
Nicolas Ferre1b447912013-06-04 21:57:11 +00002094 bp->rx_buffer_size =
Nicolas Ferre4df95132013-06-04 21:57:12 +00002095 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
Nicolas Ferre1b447912013-06-04 21:57:11 +00002096 }
Nicolas Ferre1b447912013-06-04 21:57:11 +00002097 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002098
Alexey Dobriyan5b5e0922017-02-27 14:30:02 -08002099 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
Nicolas Ferre4df95132013-06-04 21:57:12 +00002100 bp->dev->mtu, bp->rx_buffer_size);
Nicolas Ferre1b447912013-06-04 21:57:11 +00002101}
2102
Nicolas Ferre4df95132013-06-04 21:57:12 +00002103static void gem_free_rx_buffers(struct macb *bp)
2104{
2105 struct sk_buff *skb;
2106 struct macb_dma_desc *desc;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002107 struct macb_queue *queue;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002108 dma_addr_t addr;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002109 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002110 int i;
2111
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002112 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2113 if (!queue->rx_skbuff)
Nicolas Ferre4df95132013-06-04 21:57:12 +00002114 continue;
2115
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002116 for (i = 0; i < bp->rx_ring_size; i++) {
2117 skb = queue->rx_skbuff[i];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002118
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002119 if (!skb)
2120 continue;
2121
2122 desc = macb_rx_desc(queue, i);
2123 addr = macb_get_addr(bp, desc);
2124
2125 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2126 DMA_FROM_DEVICE);
2127 dev_kfree_skb_any(skb);
2128 skb = NULL;
2129 }
2130
2131 kfree(queue->rx_skbuff);
2132 queue->rx_skbuff = NULL;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002133 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002134}
2135
2136static void macb_free_rx_buffers(struct macb *bp)
2137{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002138 struct macb_queue *queue = &bp->queues[0];
2139
2140 if (queue->rx_buffers) {
Nicolas Ferre4df95132013-06-04 21:57:12 +00002141 dma_free_coherent(&bp->pdev->dev,
Zach Brownb410d132016-10-19 09:56:57 -05002142 bp->rx_ring_size * bp->rx_buffer_size,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002143 queue->rx_buffers, queue->rx_buffers_dma);
2144 queue->rx_buffers = NULL;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002145 }
2146}
Nicolas Ferre1b447912013-06-04 21:57:11 +00002147
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002148static void macb_free_consistent(struct macb *bp)
2149{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002150 struct macb_queue *queue;
2151 unsigned int q;
Harini Katakam404cd082018-07-06 12:18:58 +05302152 int size;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002153
Nicolas Ferre4df95132013-06-04 21:57:12 +00002154 bp->macbgem_ops.mog_free_rx_buffers(bp);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002155
2156 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2157 kfree(queue->tx_skb);
2158 queue->tx_skb = NULL;
2159 if (queue->tx_ring) {
Harini Katakam404cd082018-07-06 12:18:58 +05302160 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
2161 dma_free_coherent(&bp->pdev->dev, size,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002162 queue->tx_ring, queue->tx_ring_dma);
2163 queue->tx_ring = NULL;
2164 }
Harini Katakame50b7702018-07-06 12:18:57 +05302165 if (queue->rx_ring) {
Harini Katakam404cd082018-07-06 12:18:58 +05302166 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
2167 dma_free_coherent(&bp->pdev->dev, size,
Harini Katakame50b7702018-07-06 12:18:57 +05302168 queue->rx_ring, queue->rx_ring_dma);
2169 queue->rx_ring = NULL;
2170 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002171 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002172}
2173
2174static int gem_alloc_rx_buffers(struct macb *bp)
2175{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002176 struct macb_queue *queue;
2177 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002178 int size;
2179
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002180 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2181 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2182 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2183 if (!queue->rx_skbuff)
2184 return -ENOMEM;
2185 else
2186 netdev_dbg(bp->dev,
2187 "Allocated %d RX struct sk_buff entries at %p\n",
2188 bp->rx_ring_size, queue->rx_skbuff);
2189 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002190 return 0;
2191}
2192
2193static int macb_alloc_rx_buffers(struct macb *bp)
2194{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002195 struct macb_queue *queue = &bp->queues[0];
Nicolas Ferre4df95132013-06-04 21:57:12 +00002196 int size;
2197
Zach Brownb410d132016-10-19 09:56:57 -05002198 size = bp->rx_ring_size * bp->rx_buffer_size;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002199 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2200 &queue->rx_buffers_dma, GFP_KERNEL);
2201 if (!queue->rx_buffers)
Nicolas Ferre4df95132013-06-04 21:57:12 +00002202 return -ENOMEM;
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002203
2204 netdev_dbg(bp->dev,
2205 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002206 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
Nicolas Ferre4df95132013-06-04 21:57:12 +00002207 return 0;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002208}
2209
2210static int macb_alloc_consistent(struct macb *bp)
2211{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002212 struct macb_queue *queue;
2213 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002214 int size;
2215
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002216 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Harini Katakam404cd082018-07-06 12:18:58 +05302217 size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002218 queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2219 &queue->tx_ring_dma,
2220 GFP_KERNEL);
2221 if (!queue->tx_ring)
2222 goto out_err;
2223 netdev_dbg(bp->dev,
2224 "Allocated TX ring for queue %u of %d bytes at %08lx (mapped %p)\n",
2225 q, size, (unsigned long)queue->tx_ring_dma,
2226 queue->tx_ring);
2227
Zach Brownb410d132016-10-19 09:56:57 -05002228 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002229 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2230 if (!queue->tx_skb)
2231 goto out_err;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002232
Harini Katakam404cd082018-07-06 12:18:58 +05302233 size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002234 queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
2235 &queue->rx_ring_dma, GFP_KERNEL);
2236 if (!queue->rx_ring)
2237 goto out_err;
2238 netdev_dbg(bp->dev,
2239 "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
2240 size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002241 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002242 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002243 goto out_err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002244
2245 return 0;
2246
2247out_err:
2248 macb_free_consistent(bp);
2249 return -ENOMEM;
2250}
2251
Nicolas Ferre4df95132013-06-04 21:57:12 +00002252static void gem_init_rings(struct macb *bp)
2253{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002254 struct macb_queue *queue;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002255 struct macb_dma_desc *desc = NULL;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002256 unsigned int q;
Nicolas Ferre4df95132013-06-04 21:57:12 +00002257 int i;
2258
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002259 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
Zach Brownb410d132016-10-19 09:56:57 -05002260 for (i = 0; i < bp->tx_ring_size; i++) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002261 desc = macb_tx_desc(queue, i);
2262 macb_set_addr(bp, desc, 0);
2263 desc->ctrl = MACB_BIT(TX_USED);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002264 }
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002265 desc->ctrl |= MACB_BIT(TX_WRAP);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002266 queue->tx_head = 0;
2267 queue->tx_tail = 0;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002268
2269 queue->rx_tail = 0;
2270 queue->rx_prepared_head = 0;
2271
2272 gem_rx_refill(queue);
Nicolas Ferre4df95132013-06-04 21:57:12 +00002273 }
Nicolas Ferre4df95132013-06-04 21:57:12 +00002274
Nicolas Ferre4df95132013-06-04 21:57:12 +00002275}
2276
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002277static void macb_init_rings(struct macb *bp)
2278{
2279 int i;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002280 struct macb_dma_desc *desc = NULL;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002281
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002282 macb_init_rx_ring(&bp->queues[0]);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002283
Zach Brownb410d132016-10-19 09:56:57 -05002284 for (i = 0; i < bp->tx_ring_size; i++) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002285 desc = macb_tx_desc(&bp->queues[0], i);
2286 macb_set_addr(bp, desc, 0);
2287 desc->ctrl = MACB_BIT(TX_USED);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002288 }
Ben Shelton21d35152015-04-22 17:28:54 -05002289 bp->queues[0].tx_head = 0;
2290 bp->queues[0].tx_tail = 0;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002291 desc->ctrl |= MACB_BIT(TX_WRAP);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002292}
2293
2294static void macb_reset_hw(struct macb *bp)
2295{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002296 struct macb_queue *queue;
2297 unsigned int q;
Anssi Hannula0da70f82018-08-23 10:45:22 +03002298 u32 ctrl = macb_readl(bp, NCR);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002299
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002300 /* Disable RX and TX (XXX: Should we halt the transmission
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002301 * more gracefully?)
2302 */
Anssi Hannula0da70f82018-08-23 10:45:22 +03002303 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002304
2305 /* Clear the stats registers (XXX: Update stats first?) */
Anssi Hannula0da70f82018-08-23 10:45:22 +03002306 ctrl |= MACB_BIT(CLRSTAT);
2307
2308 macb_writel(bp, NCR, ctrl);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002309
2310 /* Clear all status flags */
Joachim Eastwood95ebcea2012-10-22 08:45:31 +00002311 macb_writel(bp, TSR, -1);
2312 macb_writel(bp, RSR, -1);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002313
2314 /* Disable all interrupts */
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002315 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2316 queue_writel(queue, IDR, -1);
2317 queue_readl(queue, ISR);
Nathan Sullivan24468372016-01-14 13:27:27 -06002318 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
2319 queue_writel(queue, ISR, -1);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002320 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002321}
2322
Jamie Iles70c9f3d2011-03-09 16:22:54 +00002323static u32 gem_mdc_clk_div(struct macb *bp)
2324{
2325 u32 config;
2326 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2327
2328 if (pclk_hz <= 20000000)
2329 config = GEM_BF(CLK, GEM_CLK_DIV8);
2330 else if (pclk_hz <= 40000000)
2331 config = GEM_BF(CLK, GEM_CLK_DIV16);
2332 else if (pclk_hz <= 80000000)
2333 config = GEM_BF(CLK, GEM_CLK_DIV32);
2334 else if (pclk_hz <= 120000000)
2335 config = GEM_BF(CLK, GEM_CLK_DIV48);
2336 else if (pclk_hz <= 160000000)
2337 config = GEM_BF(CLK, GEM_CLK_DIV64);
2338 else
2339 config = GEM_BF(CLK, GEM_CLK_DIV96);
2340
2341 return config;
2342}
2343
2344static u32 macb_mdc_clk_div(struct macb *bp)
2345{
2346 u32 config;
2347 unsigned long pclk_hz;
2348
2349 if (macb_is_gem(bp))
2350 return gem_mdc_clk_div(bp);
2351
2352 pclk_hz = clk_get_rate(bp->pclk);
2353 if (pclk_hz <= 20000000)
2354 config = MACB_BF(CLK, MACB_CLK_DIV8);
2355 else if (pclk_hz <= 40000000)
2356 config = MACB_BF(CLK, MACB_CLK_DIV16);
2357 else if (pclk_hz <= 80000000)
2358 config = MACB_BF(CLK, MACB_CLK_DIV32);
2359 else
2360 config = MACB_BF(CLK, MACB_CLK_DIV64);
2361
2362 return config;
2363}
2364
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002365/* Get the DMA bus width field of the network configuration register that we
Jamie Iles757a03c2011-03-09 16:29:59 +00002366 * should program. We find the width from decoding the design configuration
2367 * register to find the maximum supported data bus width.
2368 */
2369static u32 macb_dbw(struct macb *bp)
2370{
2371 if (!macb_is_gem(bp))
2372 return 0;
2373
2374 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2375 case 4:
2376 return GEM_BF(DBW, GEM_DBW128);
2377 case 2:
2378 return GEM_BF(DBW, GEM_DBW64);
2379 case 1:
2380 default:
2381 return GEM_BF(DBW, GEM_DBW32);
2382 }
2383}
2384
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002385/* Configure the receive DMA engine
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002386 * - use the correct receive buffer size
Nicolas Ferree1755872014-07-24 13:50:58 +02002387 * - set best burst length for DMA operations
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002388 * (if not supported by FIFO, it will fallback to default)
2389 * - set both rx/tx packet buffers to full memory size
2390 * These are configurable parameters for GEM.
Jamie Iles0116da42011-03-14 17:38:30 +00002391 */
2392static void macb_configure_dma(struct macb *bp)
2393{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002394 struct macb_queue *queue;
2395 u32 buffer_size;
2396 unsigned int q;
Jamie Iles0116da42011-03-14 17:38:30 +00002397 u32 dmacfg;
2398
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002399 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
Jamie Iles0116da42011-03-14 17:38:30 +00002400 if (macb_is_gem(bp)) {
2401 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002402 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2403 if (q)
2404 queue_writel(queue, RBQS, buffer_size);
2405 else
2406 dmacfg |= GEM_BF(RXBS, buffer_size);
2407 }
Nicolas Ferree1755872014-07-24 13:50:58 +02002408 if (bp->dma_burst_length)
2409 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
Nicolas Ferreb3e3bd712012-11-23 03:49:01 +00002410 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
Arun Chandrana50dad32015-02-18 16:59:35 +05302411 dmacfg &= ~GEM_BIT(ENDIA_PKT);
Arun Chandran62f69242015-03-01 11:38:02 +05302412
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03002413 if (bp->native_io)
Arun Chandran62f69242015-03-01 11:38:02 +05302414 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2415 else
2416 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2417
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02002418 if (bp->dev->features & NETIF_F_HW_CSUM)
2419 dmacfg |= GEM_BIT(TXCOEN);
2420 else
2421 dmacfg &= ~GEM_BIT(TXCOEN);
Harini Katakamfff80192016-08-09 13:15:53 +05302422
Michal Simekbd620722018-09-25 08:32:50 +02002423 dmacfg &= ~GEM_BIT(ADDR64);
Harini Katakamfff80192016-08-09 13:15:53 +05302424#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002425 if (bp->hw_dma_cap & HW_DMA_CAP_64B)
Rafal Ozieblodc97a892017-01-27 15:08:20 +00002426 dmacfg |= GEM_BIT(ADDR64);
Harini Katakamfff80192016-08-09 13:15:53 +05302427#endif
Rafal Ozieblo7b429612017-06-29 07:12:51 +01002428#ifdef CONFIG_MACB_USE_HWSTAMP
2429 if (bp->hw_dma_cap & HW_DMA_CAP_PTP)
2430 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2431#endif
Nicolas Ferree1755872014-07-24 13:50:58 +02002432 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2433 dmacfg);
Jamie Iles0116da42011-03-14 17:38:30 +00002434 gem_writel(bp, DMACFG, dmacfg);
2435 }
2436}
2437
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002438static void macb_init_hw(struct macb *bp)
2439{
2440 u32 config;
2441
2442 macb_reset_hw(bp);
Joachim Eastwood314bccc2012-11-07 08:14:52 +00002443 macb_set_hwaddr(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002444
Jamie Iles70c9f3d2011-03-09 16:22:54 +00002445 config = macb_mdc_clk_div(bp);
Havard Skinnemoen29bc2e12012-10-31 06:04:58 +00002446 config |= MACB_BF(RBOF, NET_IP_ALIGN); /* Make eth data aligned */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002447 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
Dan Carpentera104a6b2015-05-12 21:15:24 +03002448 if (bp->caps & MACB_CAPS_JUMBO)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302449 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
2450 else
2451 config |= MACB_BIT(BIG); /* Receive oversized frames */
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002452 if (bp->dev->flags & IFF_PROMISC)
2453 config |= MACB_BIT(CAF); /* Copy All Frames */
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002454 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
2455 config |= GEM_BIT(RXCOEN);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002456 if (!(bp->dev->flags & IFF_BROADCAST))
2457 config |= MACB_BIT(NBC); /* No BroadCast */
Jamie Iles757a03c2011-03-09 16:29:59 +00002458 config |= macb_dbw(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002459 macb_writel(bp, NCFGR, config);
Dan Carpentera104a6b2015-05-12 21:15:24 +03002460 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302461 gem_writel(bp, JML, bp->jumbo_max_len);
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302462 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
Dan Carpentera104a6b2015-05-12 21:15:24 +03002463 if (bp->caps & MACB_CAPS_JUMBO)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05302464 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002465
Jamie Iles0116da42011-03-14 17:38:30 +00002466 macb_configure_dma(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002467}
2468
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002469/* The hash address register is 64 bits long and takes up two
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002470 * locations in the memory map. The least significant bits are stored
2471 * in EMAC_HSL and the most significant bits in EMAC_HSH.
2472 *
2473 * The unicast hash enable and the multicast hash enable bits in the
2474 * network configuration register enable the reception of hash matched
2475 * frames. The destination address is reduced to a 6 bit index into
2476 * the 64 bit hash register using the following hash function. The
2477 * hash function is an exclusive or of every sixth bit of the
2478 * destination address.
2479 *
2480 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
2481 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
2482 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
2483 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
2484 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
2485 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
2486 *
2487 * da[0] represents the least significant bit of the first byte
2488 * received, that is, the multicast/unicast indicator, and da[47]
2489 * represents the most significant bit of the last byte received. If
2490 * the hash index, hi[n], points to a bit that is set in the hash
2491 * register then the frame will be matched according to whether the
2492 * frame is multicast or unicast. A multicast match will be signalled
2493 * if the multicast hash enable bit is set, da[0] is 1 and the hash
2494 * index points to a bit set in the hash register. A unicast match
2495 * will be signalled if the unicast hash enable bit is set, da[0] is 0
2496 * and the hash index points to a bit set in the hash register. To
2497 * receive all multicast frames, the hash register should be set with
2498 * all ones and the multicast hash enable bit should be set in the
2499 * network configuration register.
2500 */
2501
2502static inline int hash_bit_value(int bitnr, __u8 *addr)
2503{
2504 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
2505 return 1;
2506 return 0;
2507}
2508
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002509/* Return the hash index value for the specified address. */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002510static int hash_get_index(__u8 *addr)
2511{
2512 int i, j, bitval;
2513 int hash_index = 0;
2514
2515 for (j = 0; j < 6; j++) {
2516 for (i = 0, bitval = 0; i < 8; i++)
Xander Huff2fa45e22015-01-15 15:55:19 -06002517 bitval ^= hash_bit_value(i * 6 + j, addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002518
2519 hash_index |= (bitval << j);
2520 }
2521
2522 return hash_index;
2523}
2524
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002525/* Add multicast addresses to the internal multicast-hash table. */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002526static void macb_sethashtable(struct net_device *dev)
2527{
Jiri Pirko22bedad32010-04-01 21:22:57 +00002528 struct netdev_hw_addr *ha;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002529 unsigned long mc_filter[2];
Jiri Pirkof9dcbcc2010-02-23 09:19:49 +00002530 unsigned int bitnr;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002531 struct macb *bp = netdev_priv(dev);
2532
Moritz Fischeraa50b552016-03-29 19:11:13 -07002533 mc_filter[0] = 0;
2534 mc_filter[1] = 0;
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002535
Jiri Pirko22bedad32010-04-01 21:22:57 +00002536 netdev_for_each_mc_addr(ha, dev) {
2537 bitnr = hash_get_index(ha->addr);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002538 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
2539 }
2540
Jamie Ilesf75ba502011-11-08 10:12:32 +00002541 macb_or_gem_writel(bp, HRB, mc_filter[0]);
2542 macb_or_gem_writel(bp, HRT, mc_filter[1]);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002543}
2544
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002545/* Enable/Disable promiscuous and multicast modes. */
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01002546static void macb_set_rx_mode(struct net_device *dev)
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002547{
2548 unsigned long cfg;
2549 struct macb *bp = netdev_priv(dev);
2550
2551 cfg = macb_readl(bp, NCFGR);
2552
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002553 if (dev->flags & IFF_PROMISC) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002554 /* Enable promiscuous mode */
2555 cfg |= MACB_BIT(CAF);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002556
2557 /* Disable RX checksum offload */
2558 if (macb_is_gem(bp))
2559 cfg &= ~GEM_BIT(RXCOEN);
2560 } else {
2561 /* Disable promiscuous mode */
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002562 cfg &= ~MACB_BIT(CAF);
2563
Cyrille Pitchen924ec532014-07-24 13:51:01 +02002564 /* Enable RX checksum offload only if requested */
2565 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
2566 cfg |= GEM_BIT(RXCOEN);
2567 }
2568
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002569 if (dev->flags & IFF_ALLMULTI) {
2570 /* Enable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00002571 macb_or_gem_writel(bp, HRB, -1);
2572 macb_or_gem_writel(bp, HRT, -1);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002573 cfg |= MACB_BIT(NCFGR_MTI);
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00002574 } else if (!netdev_mc_empty(dev)) {
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002575 /* Enable specific multicasts */
2576 macb_sethashtable(dev);
2577 cfg |= MACB_BIT(NCFGR_MTI);
2578 } else if (dev->flags & (~IFF_ALLMULTI)) {
2579 /* Disable all multicast mode */
Jamie Ilesf75ba502011-11-08 10:12:32 +00002580 macb_or_gem_writel(bp, HRB, 0);
2581 macb_or_gem_writel(bp, HRT, 0);
Patrice Vilchez446ebd02007-07-12 19:07:25 +02002582 cfg &= ~MACB_BIT(NCFGR_MTI);
2583 }
2584
2585 macb_writel(bp, NCFGR, cfg);
2586}
2587
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002588static int macb_open(struct net_device *dev)
2589{
Nicolas Ferre4df95132013-06-04 21:57:12 +00002590 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
Antoine Tenart7897b072019-11-13 10:00:06 +01002591 struct macb *bp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002592 struct macb_queue *queue;
2593 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002594 int err;
2595
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002596 netdev_dbg(bp->dev, "open\n");
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002597
Harini Katakamd54f89a2019-03-01 16:20:34 +05302598 err = pm_runtime_get_sync(&bp->pdev->dev);
2599 if (err < 0)
2600 goto pm_exit;
2601
Nicolas Ferre1b447912013-06-04 21:57:11 +00002602 /* RX buffers initialization */
Nicolas Ferre4df95132013-06-04 21:57:12 +00002603 macb_init_rx_buffer_size(bp, bufsz);
Nicolas Ferre1b447912013-06-04 21:57:11 +00002604
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002605 err = macb_alloc_consistent(bp);
2606 if (err) {
Jamie Ilesc220f8c2011-03-08 20:27:08 +00002607 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
2608 err);
Harini Katakamd54f89a2019-03-01 16:20:34 +05302609 goto pm_exit;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002610 }
2611
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002612 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2613 napi_enable(&queue->napi);
2614
Harini Katakam05044532019-05-07 19:59:10 +05302615 macb_init_hw(bp);
2616
Antoine Tenart7897b072019-11-13 10:00:06 +01002617 err = macb_phylink_connect(bp);
2618 if (err)
Claudiu Bezneafaa620872020-06-18 11:37:40 +03002619 goto reset_hw;
frederic RODO6c36a702007-07-12 19:07:24 +02002620
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002621 netif_tx_start_all_queues(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002622
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002623 if (bp->ptp_info)
2624 bp->ptp_info->ptp_init(dev);
2625
Charles Keepax939a5bf72020-06-15 14:18:54 +01002626 return 0;
2627
Claudiu Bezneafaa620872020-06-18 11:37:40 +03002628reset_hw:
2629 macb_reset_hw(bp);
Corentin Labbe014406b2020-06-10 09:53:44 +00002630 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2631 napi_disable(&queue->napi);
Claudiu Bezneafaa620872020-06-18 11:37:40 +03002632 macb_free_consistent(bp);
Harini Katakamd54f89a2019-03-01 16:20:34 +05302633pm_exit:
Charles Keepax939a5bf72020-06-15 14:18:54 +01002634 pm_runtime_put_sync(&bp->pdev->dev);
2635 return err;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002636}
2637
2638static int macb_close(struct net_device *dev)
2639{
2640 struct macb *bp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002641 struct macb_queue *queue;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002642 unsigned long flags;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002643 unsigned int q;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002644
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002645 netif_tx_stop_all_queues(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00002646
2647 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2648 napi_disable(&queue->napi);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002649
Antoine Tenart7897b072019-11-13 10:00:06 +01002650 phylink_stop(bp->phylink);
2651 phylink_disconnect_phy(bp->phylink);
frederic RODO6c36a702007-07-12 19:07:24 +02002652
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002653 spin_lock_irqsave(&bp->lock, flags);
2654 macb_reset_hw(bp);
2655 netif_carrier_off(dev);
2656 spin_unlock_irqrestore(&bp->lock, flags);
2657
2658 macb_free_consistent(bp);
2659
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02002660 if (bp->ptp_info)
2661 bp->ptp_info->ptp_remove(dev);
2662
Harini Katakamd54f89a2019-03-01 16:20:34 +05302663 pm_runtime_put(&bp->pdev->dev);
2664
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002665 return 0;
2666}
2667
Harini Katakama5898ea2015-05-06 22:27:18 +05302668static int macb_change_mtu(struct net_device *dev, int new_mtu)
2669{
Harini Katakama5898ea2015-05-06 22:27:18 +05302670 if (netif_running(dev))
2671 return -EBUSY;
2672
Harini Katakama5898ea2015-05-06 22:27:18 +05302673 dev->mtu = new_mtu;
2674
2675 return 0;
2676}
2677
Jamie Ilesa494ed82011-03-09 16:26:35 +00002678static void gem_update_stats(struct macb *bp)
2679{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002680 struct macb_queue *queue;
2681 unsigned int i, q, idx;
2682 unsigned long *stat;
2683
Jamie Ilesa494ed82011-03-09 16:26:35 +00002684 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002685
Xander Huff3ff13f12015-01-13 16:15:51 -06002686 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
2687 u32 offset = gem_statistics[i].offset;
David S. Miller7a6e0702015-07-27 14:24:48 -07002688 u64 val = bp->macb_reg_readl(bp, offset);
Xander Huff3ff13f12015-01-13 16:15:51 -06002689
2690 bp->ethtool_stats[i] += val;
2691 *p += val;
2692
2693 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
2694 /* Add GEM_OCTTXH, GEM_OCTRXH */
David S. Miller7a6e0702015-07-27 14:24:48 -07002695 val = bp->macb_reg_readl(bp, offset + 4);
Xander Huff2fa45e22015-01-15 15:55:19 -06002696 bp->ethtool_stats[i] += ((u64)val) << 32;
Xander Huff3ff13f12015-01-13 16:15:51 -06002697 *(++p) += val;
2698 }
2699 }
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002700
2701 idx = GEM_STATS_LEN;
2702 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2703 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
2704 bp->ethtool_stats[idx++] = *stat;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002705}
2706
2707static struct net_device_stats *gem_get_stats(struct macb *bp)
2708{
2709 struct gem_stats *hwstat = &bp->hw_stats.gem;
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02002710 struct net_device_stats *nstat = &bp->dev->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002711
2712 gem_update_stats(bp);
2713
2714 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
2715 hwstat->rx_alignment_errors +
2716 hwstat->rx_resource_errors +
2717 hwstat->rx_overruns +
2718 hwstat->rx_oversize_frames +
2719 hwstat->rx_jabbers +
2720 hwstat->rx_undersized_frames +
2721 hwstat->rx_length_field_frame_errors);
2722 nstat->tx_errors = (hwstat->tx_late_collisions +
2723 hwstat->tx_excessive_collisions +
2724 hwstat->tx_underrun +
2725 hwstat->tx_carrier_sense_errors);
2726 nstat->multicast = hwstat->rx_multicast_frames;
2727 nstat->collisions = (hwstat->tx_single_collision_frames +
2728 hwstat->tx_multiple_collision_frames +
2729 hwstat->tx_excessive_collisions);
2730 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
2731 hwstat->rx_jabbers +
2732 hwstat->rx_undersized_frames +
2733 hwstat->rx_length_field_frame_errors);
2734 nstat->rx_over_errors = hwstat->rx_resource_errors;
2735 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
2736 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
2737 nstat->rx_fifo_errors = hwstat->rx_overruns;
2738 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
2739 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
2740 nstat->tx_fifo_errors = hwstat->tx_underrun;
2741
2742 return nstat;
2743}
2744
Xander Huff3ff13f12015-01-13 16:15:51 -06002745static void gem_get_ethtool_stats(struct net_device *dev,
2746 struct ethtool_stats *stats, u64 *data)
2747{
2748 struct macb *bp;
2749
2750 bp = netdev_priv(dev);
2751 gem_update_stats(bp);
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002752 memcpy(data, &bp->ethtool_stats, sizeof(u64)
2753 * (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
Xander Huff3ff13f12015-01-13 16:15:51 -06002754}
2755
2756static int gem_get_sset_count(struct net_device *dev, int sset)
2757{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002758 struct macb *bp = netdev_priv(dev);
2759
Xander Huff3ff13f12015-01-13 16:15:51 -06002760 switch (sset) {
2761 case ETH_SS_STATS:
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002762 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
Xander Huff3ff13f12015-01-13 16:15:51 -06002763 default:
2764 return -EOPNOTSUPP;
2765 }
2766}
2767
2768static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
2769{
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002770 char stat_string[ETH_GSTRING_LEN];
2771 struct macb *bp = netdev_priv(dev);
2772 struct macb_queue *queue;
Andy Shevchenko8bcbf822015-07-24 21:24:02 +03002773 unsigned int i;
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002774 unsigned int q;
Xander Huff3ff13f12015-01-13 16:15:51 -06002775
2776 switch (sset) {
2777 case ETH_SS_STATS:
2778 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
2779 memcpy(p, gem_statistics[i].stat_string,
2780 ETH_GSTRING_LEN);
Rafal Ozieblo512286b2017-11-30 18:19:56 +00002781
2782 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2783 for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
2784 snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
2785 q, queue_statistics[i].stat_string);
2786 memcpy(p, stat_string, ETH_GSTRING_LEN);
2787 }
2788 }
Xander Huff3ff13f12015-01-13 16:15:51 -06002789 break;
2790 }
2791}
2792
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01002793static struct net_device_stats *macb_get_stats(struct net_device *dev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002794{
2795 struct macb *bp = netdev_priv(dev);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02002796 struct net_device_stats *nstat = &bp->dev->stats;
Jamie Ilesa494ed82011-03-09 16:26:35 +00002797 struct macb_stats *hwstat = &bp->hw_stats.macb;
2798
2799 if (macb_is_gem(bp))
2800 return gem_get_stats(bp);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002801
frederic RODO6c36a702007-07-12 19:07:24 +02002802 /* read stats from hardware */
2803 macb_update_stats(bp);
2804
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002805 /* Convert HW stats into netdevice stats */
2806 nstat->rx_errors = (hwstat->rx_fcs_errors +
2807 hwstat->rx_align_errors +
2808 hwstat->rx_resource_errors +
2809 hwstat->rx_overruns +
2810 hwstat->rx_oversize_pkts +
2811 hwstat->rx_jabbers +
2812 hwstat->rx_undersize_pkts +
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002813 hwstat->rx_length_mismatch);
2814 nstat->tx_errors = (hwstat->tx_late_cols +
2815 hwstat->tx_excessive_cols +
2816 hwstat->tx_underruns +
Wolfgang Steinwender716723c2015-04-10 11:42:56 +02002817 hwstat->tx_carrier_errors +
2818 hwstat->sqe_test_errors);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002819 nstat->collisions = (hwstat->tx_single_cols +
2820 hwstat->tx_multiple_cols +
2821 hwstat->tx_excessive_cols);
2822 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
2823 hwstat->rx_jabbers +
2824 hwstat->rx_undersize_pkts +
2825 hwstat->rx_length_mismatch);
Alexander Steinb19f7f72011-04-13 05:03:24 +00002826 nstat->rx_over_errors = hwstat->rx_resource_errors +
2827 hwstat->rx_overruns;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01002828 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
2829 nstat->rx_frame_errors = hwstat->rx_align_errors;
2830 nstat->rx_fifo_errors = hwstat->rx_overruns;
2831 /* XXX: What does "missed" mean? */
2832 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
2833 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
2834 nstat->tx_fifo_errors = hwstat->tx_underruns;
2835 /* Don't know about heartbeat or window errors... */
2836
2837 return nstat;
2838}
2839
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002840static int macb_get_regs_len(struct net_device *netdev)
2841{
2842 return MACB_GREGS_NBR * sizeof(u32);
2843}
2844
2845static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
2846 void *p)
2847{
2848 struct macb *bp = netdev_priv(dev);
2849 unsigned int tail, head;
2850 u32 *regs_buff = p;
2851
2852 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
2853 | MACB_GREGS_VERSION;
2854
Zach Brownb410d132016-10-19 09:56:57 -05002855 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
2856 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002857
2858 regs_buff[0] = macb_readl(bp, NCR);
2859 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
2860 regs_buff[2] = macb_readl(bp, NSR);
2861 regs_buff[3] = macb_readl(bp, TSR);
2862 regs_buff[4] = macb_readl(bp, RBQP);
2863 regs_buff[5] = macb_readl(bp, TBQP);
2864 regs_buff[6] = macb_readl(bp, RSR);
2865 regs_buff[7] = macb_readl(bp, IMR);
2866
2867 regs_buff[8] = tail;
2868 regs_buff[9] = head;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01002869 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
2870 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002871
Neil Armstrongce721a72016-01-05 14:39:16 +01002872 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
2873 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
Moritz Fischer64ec42f2016-03-29 19:11:12 -07002874 if (macb_is_gem(bp))
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002875 regs_buff[13] = gem_readl(bp, DMACFG);
Nicolas Ferred1d1b532012-10-31 06:04:56 +00002876}
2877
Sergio Prado3e2a5e12016-02-09 12:07:16 -02002878static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2879{
2880 struct macb *bp = netdev_priv(netdev);
2881
Nicolas Ferre253fe092020-07-10 14:46:43 +02002882 if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) {
Antoine Tenart7897b072019-11-13 10:00:06 +01002883 phylink_ethtool_get_wol(bp->phylink, wol);
Nicolas Ferre253fe092020-07-10 14:46:43 +02002884 wol->supported |= WAKE_MAGIC;
2885
2886 if (bp->wol & MACB_WOL_ENABLED)
2887 wol->wolopts |= WAKE_MAGIC;
2888 }
Sergio Prado3e2a5e12016-02-09 12:07:16 -02002889}
2890
2891static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
2892{
2893 struct macb *bp = netdev_priv(netdev);
Antoine Tenart7897b072019-11-13 10:00:06 +01002894 int ret;
2895
Nicolas Ferre253fe092020-07-10 14:46:43 +02002896 /* Pass the order to phylink layer */
Antoine Tenart7897b072019-11-13 10:00:06 +01002897 ret = phylink_ethtool_set_wol(bp->phylink, wol);
Nicolas Ferre253fe092020-07-10 14:46:43 +02002898 /* Don't manage WoL on MAC if handled by the PHY
2899 * or if there's a failure in talking to the PHY
2900 */
2901 if (!ret || ret != -EOPNOTSUPP)
2902 return ret;
Sergio Prado3e2a5e12016-02-09 12:07:16 -02002903
2904 if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) ||
2905 (wol->wolopts & ~WAKE_MAGIC))
2906 return -EOPNOTSUPP;
2907
2908 if (wol->wolopts & WAKE_MAGIC)
2909 bp->wol |= MACB_WOL_ENABLED;
2910 else
2911 bp->wol &= ~MACB_WOL_ENABLED;
2912
2913 device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED);
2914
2915 return 0;
2916}
2917
Antoine Tenart7897b072019-11-13 10:00:06 +01002918static int macb_get_link_ksettings(struct net_device *netdev,
2919 struct ethtool_link_ksettings *kset)
2920{
2921 struct macb *bp = netdev_priv(netdev);
2922
2923 return phylink_ethtool_ksettings_get(bp->phylink, kset);
2924}
2925
2926static int macb_set_link_ksettings(struct net_device *netdev,
2927 const struct ethtool_link_ksettings *kset)
2928{
2929 struct macb *bp = netdev_priv(netdev);
2930
2931 return phylink_ethtool_ksettings_set(bp->phylink, kset);
2932}
2933
Zach Brown8441bb32016-10-19 09:56:58 -05002934static void macb_get_ringparam(struct net_device *netdev,
2935 struct ethtool_ringparam *ring)
2936{
2937 struct macb *bp = netdev_priv(netdev);
2938
2939 ring->rx_max_pending = MAX_RX_RING_SIZE;
2940 ring->tx_max_pending = MAX_TX_RING_SIZE;
2941
2942 ring->rx_pending = bp->rx_ring_size;
2943 ring->tx_pending = bp->tx_ring_size;
2944}
2945
2946static int macb_set_ringparam(struct net_device *netdev,
2947 struct ethtool_ringparam *ring)
2948{
2949 struct macb *bp = netdev_priv(netdev);
2950 u32 new_rx_size, new_tx_size;
2951 unsigned int reset = 0;
2952
2953 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
2954 return -EINVAL;
2955
2956 new_rx_size = clamp_t(u32, ring->rx_pending,
2957 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
2958 new_rx_size = roundup_pow_of_two(new_rx_size);
2959
2960 new_tx_size = clamp_t(u32, ring->tx_pending,
2961 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
2962 new_tx_size = roundup_pow_of_two(new_tx_size);
2963
2964 if ((new_tx_size == bp->tx_ring_size) &&
2965 (new_rx_size == bp->rx_ring_size)) {
2966 /* nothing to do */
2967 return 0;
2968 }
2969
2970 if (netif_running(bp->dev)) {
2971 reset = 1;
2972 macb_close(bp->dev);
2973 }
2974
2975 bp->rx_ring_size = new_rx_size;
2976 bp->tx_ring_size = new_tx_size;
2977
2978 if (reset)
2979 macb_open(bp->dev);
2980
2981 return 0;
2982}
2983
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01002984#ifdef CONFIG_MACB_USE_HWSTAMP
2985static unsigned int gem_get_tsu_rate(struct macb *bp)
2986{
2987 struct clk *tsu_clk;
2988 unsigned int tsu_rate;
2989
2990 tsu_clk = devm_clk_get(&bp->pdev->dev, "tsu_clk");
2991 if (!IS_ERR(tsu_clk))
2992 tsu_rate = clk_get_rate(tsu_clk);
2993 /* try pclk instead */
2994 else if (!IS_ERR(bp->pclk)) {
2995 tsu_clk = bp->pclk;
2996 tsu_rate = clk_get_rate(tsu_clk);
2997 } else
2998 return -ENOTSUPP;
2999 return tsu_rate;
3000}
3001
3002static s32 gem_get_ptp_max_adj(void)
3003{
3004 return 64000000;
3005}
3006
3007static int gem_get_ts_info(struct net_device *dev,
3008 struct ethtool_ts_info *info)
3009{
3010 struct macb *bp = netdev_priv(dev);
3011
3012 if ((bp->hw_dma_cap & HW_DMA_CAP_PTP) == 0) {
3013 ethtool_op_get_ts_info(dev, info);
3014 return 0;
3015 }
3016
3017 info->so_timestamping =
3018 SOF_TIMESTAMPING_TX_SOFTWARE |
3019 SOF_TIMESTAMPING_RX_SOFTWARE |
3020 SOF_TIMESTAMPING_SOFTWARE |
3021 SOF_TIMESTAMPING_TX_HARDWARE |
3022 SOF_TIMESTAMPING_RX_HARDWARE |
3023 SOF_TIMESTAMPING_RAW_HARDWARE;
3024 info->tx_types =
3025 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
3026 (1 << HWTSTAMP_TX_OFF) |
3027 (1 << HWTSTAMP_TX_ON);
3028 info->rx_filters =
3029 (1 << HWTSTAMP_FILTER_NONE) |
3030 (1 << HWTSTAMP_FILTER_ALL);
3031
3032 info->phc_index = bp->ptp_clock ? ptp_clock_index(bp->ptp_clock) : -1;
3033
3034 return 0;
3035}
3036
3037static struct macb_ptp_info gem_ptp_info = {
3038 .ptp_init = gem_ptp_init,
3039 .ptp_remove = gem_ptp_remove,
3040 .get_ptp_max_adj = gem_get_ptp_max_adj,
3041 .get_tsu_rate = gem_get_tsu_rate,
3042 .get_ts_info = gem_get_ts_info,
3043 .get_hwtst = gem_get_hwtst,
3044 .set_hwtst = gem_set_hwtst,
3045};
3046#endif
3047
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003048static int macb_get_ts_info(struct net_device *netdev,
3049 struct ethtool_ts_info *info)
3050{
3051 struct macb *bp = netdev_priv(netdev);
3052
3053 if (bp->ptp_info)
3054 return bp->ptp_info->get_ts_info(netdev, info);
3055
3056 return ethtool_op_get_ts_info(netdev, info);
3057}
3058
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003059static void gem_enable_flow_filters(struct macb *bp, bool enable)
3060{
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003061 struct net_device *netdev = bp->dev;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003062 struct ethtool_rx_fs_item *item;
3063 u32 t2_scr;
3064 int num_t2_scr;
3065
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003066 if (!(netdev->features & NETIF_F_NTUPLE))
3067 return;
3068
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003069 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3070
3071 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3072 struct ethtool_rx_flow_spec *fs = &item->fs;
3073 struct ethtool_tcpip4_spec *tp4sp_m;
3074
3075 if (fs->location >= num_t2_scr)
3076 continue;
3077
3078 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3079
3080 /* enable/disable screener regs for the flow entry */
3081 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3082
3083 /* only enable fields with no masking */
3084 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3085
3086 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3087 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3088 else
3089 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3090
3091 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3092 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3093 else
3094 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3095
3096 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3097 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3098 else
3099 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3100
3101 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3102 }
3103}
3104
3105static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3106{
3107 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3108 uint16_t index = fs->location;
3109 u32 w0, w1, t2_scr;
3110 bool cmp_a = false;
3111 bool cmp_b = false;
3112 bool cmp_c = false;
3113
3114 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3115 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3116
3117 /* ignore field if any masking set */
3118 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3119 /* 1st compare reg - IP source address */
3120 w0 = 0;
3121 w1 = 0;
3122 w0 = tp4sp_v->ip4src;
3123 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3124 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3125 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3126 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3127 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3128 cmp_a = true;
3129 }
3130
3131 /* ignore field if any masking set */
3132 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3133 /* 2nd compare reg - IP destination address */
3134 w0 = 0;
3135 w1 = 0;
3136 w0 = tp4sp_v->ip4dst;
3137 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3138 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3139 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3140 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3141 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3142 cmp_b = true;
3143 }
3144
3145 /* ignore both port fields if masking set in both */
3146 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3147 /* 3rd compare reg - source port, destination port */
3148 w0 = 0;
3149 w1 = 0;
3150 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3151 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3152 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3153 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3154 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3155 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3156 } else {
3157 /* only one port definition */
3158 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
3159 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3160 if (tp4sp_m->psrc == 0xFFFF) { /* src port */
3161 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3162 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3163 } else { /* dst port */
3164 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3165 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3166 }
3167 }
3168 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3169 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3170 cmp_c = true;
3171 }
3172
3173 t2_scr = 0;
3174 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3175 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3176 if (cmp_a)
3177 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3178 if (cmp_b)
3179 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3180 if (cmp_c)
3181 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3182 gem_writel_n(bp, SCRT2, index, t2_scr);
3183}
3184
3185static int gem_add_flow_filter(struct net_device *netdev,
3186 struct ethtool_rxnfc *cmd)
3187{
3188 struct macb *bp = netdev_priv(netdev);
3189 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3190 struct ethtool_rx_fs_item *item, *newfs;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003191 unsigned long flags;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003192 int ret = -EINVAL;
3193 bool added = false;
3194
Julia Cartwrightcc1674e2017-12-05 18:02:50 -06003195 newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003196 if (newfs == NULL)
3197 return -ENOMEM;
3198 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3199
3200 netdev_dbg(netdev,
3201 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3202 fs->flow_type, (int)fs->ring_cookie, fs->location,
3203 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3204 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3205 htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
3206
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003207 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3208
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003209 /* find correct place to add in list */
Julia Cartwrighta3da8ad2017-12-05 18:02:48 -06003210 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3211 if (item->fs.location > newfs->fs.location) {
3212 list_add_tail(&newfs->list, &item->list);
3213 added = true;
3214 break;
3215 } else if (item->fs.location == fs->location) {
3216 netdev_err(netdev, "Rule not added: location %d not free!\n",
3217 fs->location);
3218 ret = -EBUSY;
3219 goto err;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003220 }
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003221 }
Julia Cartwrighta3da8ad2017-12-05 18:02:48 -06003222 if (!added)
3223 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003224
3225 gem_prog_cmp_regs(bp, fs);
3226 bp->rx_fs_list.count++;
3227 /* enable filtering if NTUPLE on */
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003228 gem_enable_flow_filters(bp, 1);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003229
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003230 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003231 return 0;
3232
3233err:
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003234 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003235 kfree(newfs);
3236 return ret;
3237}
3238
3239static int gem_del_flow_filter(struct net_device *netdev,
3240 struct ethtool_rxnfc *cmd)
3241{
3242 struct macb *bp = netdev_priv(netdev);
3243 struct ethtool_rx_fs_item *item;
3244 struct ethtool_rx_flow_spec *fs;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003245 unsigned long flags;
3246
3247 spin_lock_irqsave(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003248
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003249 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3250 if (item->fs.location == cmd->fs.location) {
3251 /* disable screener regs for the flow entry */
3252 fs = &(item->fs);
3253 netdev_dbg(netdev,
3254 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3255 fs->flow_type, (int)fs->ring_cookie, fs->location,
3256 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3257 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3258 htons(fs->h_u.tcp_ip4_spec.psrc),
3259 htons(fs->h_u.tcp_ip4_spec.pdst));
3260
3261 gem_writel_n(bp, SCRT2, fs->location, 0);
3262
3263 list_del(&item->list);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003264 bp->rx_fs_list.count--;
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003265 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3266 kfree(item);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003267 return 0;
3268 }
3269 }
3270
Julia Cartwright7038cdb2017-12-05 18:02:49 -06003271 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003272 return -EINVAL;
3273}
3274
3275static int gem_get_flow_entry(struct net_device *netdev,
3276 struct ethtool_rxnfc *cmd)
3277{
3278 struct macb *bp = netdev_priv(netdev);
3279 struct ethtool_rx_fs_item *item;
3280
3281 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3282 if (item->fs.location == cmd->fs.location) {
3283 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
3284 return 0;
3285 }
3286 }
3287 return -EINVAL;
3288}
3289
3290static int gem_get_all_flow_entries(struct net_device *netdev,
3291 struct ethtool_rxnfc *cmd, u32 *rule_locs)
3292{
3293 struct macb *bp = netdev_priv(netdev);
3294 struct ethtool_rx_fs_item *item;
3295 uint32_t cnt = 0;
3296
3297 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3298 if (cnt == cmd->rule_cnt)
3299 return -EMSGSIZE;
3300 rule_locs[cnt] = item->fs.location;
3301 cnt++;
3302 }
3303 cmd->data = bp->max_tuples;
3304 cmd->rule_cnt = cnt;
3305
3306 return 0;
3307}
3308
3309static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
3310 u32 *rule_locs)
3311{
3312 struct macb *bp = netdev_priv(netdev);
3313 int ret = 0;
3314
3315 switch (cmd->cmd) {
3316 case ETHTOOL_GRXRINGS:
3317 cmd->data = bp->num_queues;
3318 break;
3319 case ETHTOOL_GRXCLSRLCNT:
3320 cmd->rule_cnt = bp->rx_fs_list.count;
3321 break;
3322 case ETHTOOL_GRXCLSRULE:
3323 ret = gem_get_flow_entry(netdev, cmd);
3324 break;
3325 case ETHTOOL_GRXCLSRLALL:
3326 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
3327 break;
3328 default:
3329 netdev_err(netdev,
3330 "Command parameter %d is not supported\n", cmd->cmd);
3331 ret = -EOPNOTSUPP;
3332 }
3333
3334 return ret;
3335}
3336
3337static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
3338{
3339 struct macb *bp = netdev_priv(netdev);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003340 int ret;
3341
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003342 switch (cmd->cmd) {
3343 case ETHTOOL_SRXCLSRLINS:
3344 if ((cmd->fs.location >= bp->max_tuples)
3345 || (cmd->fs.ring_cookie >= bp->num_queues)) {
3346 ret = -EINVAL;
3347 break;
3348 }
3349 ret = gem_add_flow_filter(netdev, cmd);
3350 break;
3351 case ETHTOOL_SRXCLSRLDEL:
3352 ret = gem_del_flow_filter(netdev, cmd);
3353 break;
3354 default:
3355 netdev_err(netdev,
3356 "Command parameter %d is not supported\n", cmd->cmd);
3357 ret = -EOPNOTSUPP;
3358 }
3359
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003360 return ret;
3361}
3362
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003363static const struct ethtool_ops macb_ethtool_ops = {
Nicolas Ferred1d1b532012-10-31 06:04:56 +00003364 .get_regs_len = macb_get_regs_len,
3365 .get_regs = macb_get_regs,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003366 .get_link = ethtool_op_get_link,
Richard Cochran17f393e2012-04-03 22:59:31 +00003367 .get_ts_info = ethtool_op_get_ts_info,
Sergio Prado3e2a5e12016-02-09 12:07:16 -02003368 .get_wol = macb_get_wol,
3369 .set_wol = macb_set_wol,
Antoine Tenart7897b072019-11-13 10:00:06 +01003370 .get_link_ksettings = macb_get_link_ksettings,
3371 .set_link_ksettings = macb_set_link_ksettings,
Zach Brown8441bb32016-10-19 09:56:58 -05003372 .get_ringparam = macb_get_ringparam,
3373 .set_ringparam = macb_set_ringparam,
Xander Huff8cd5a562015-01-15 15:55:20 -06003374};
Xander Huff8cd5a562015-01-15 15:55:20 -06003375
Lad, Prabhakar8093b1c2015-02-05 16:21:07 +00003376static const struct ethtool_ops gem_ethtool_ops = {
Xander Huff8cd5a562015-01-15 15:55:20 -06003377 .get_regs_len = macb_get_regs_len,
3378 .get_regs = macb_get_regs,
Nicolas Ferre558e35c2020-07-20 10:56:52 +02003379 .get_wol = macb_get_wol,
3380 .set_wol = macb_set_wol,
Xander Huff8cd5a562015-01-15 15:55:20 -06003381 .get_link = ethtool_op_get_link,
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003382 .get_ts_info = macb_get_ts_info,
Xander Huff3ff13f12015-01-13 16:15:51 -06003383 .get_ethtool_stats = gem_get_ethtool_stats,
3384 .get_strings = gem_get_ethtool_strings,
3385 .get_sset_count = gem_get_sset_count,
Antoine Tenart7897b072019-11-13 10:00:06 +01003386 .get_link_ksettings = macb_get_link_ksettings,
3387 .set_link_ksettings = macb_set_link_ksettings,
Zach Brown8441bb32016-10-19 09:56:58 -05003388 .get_ringparam = macb_get_ringparam,
3389 .set_ringparam = macb_set_ringparam,
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003390 .get_rxnfc = gem_get_rxnfc,
3391 .set_rxnfc = gem_set_rxnfc,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003392};
3393
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003394static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003395{
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003396 struct macb *bp = netdev_priv(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003397
3398 if (!netif_running(dev))
3399 return -EINVAL;
3400
Antoine Tenart7897b072019-11-13 10:00:06 +01003401 if (bp->ptp_info) {
3402 switch (cmd) {
3403 case SIOCSHWTSTAMP:
3404 return bp->ptp_info->set_hwtst(dev, rq, cmd);
3405 case SIOCGHWTSTAMP:
3406 return bp->ptp_info->get_hwtst(dev, rq);
3407 }
Andrei.Pistirica@microchip.comc2594d82017-01-19 17:56:15 +02003408 }
Antoine Tenart7897b072019-11-13 10:00:06 +01003409
3410 return phylink_mii_ioctl(bp->phylink, rq, cmd);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003411}
3412
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003413static inline void macb_set_txcsum_feature(struct macb *bp,
3414 netdev_features_t features)
3415{
3416 u32 val;
3417
3418 if (!macb_is_gem(bp))
3419 return;
3420
3421 val = gem_readl(bp, DMACFG);
3422 if (features & NETIF_F_HW_CSUM)
3423 val |= GEM_BIT(TXCOEN);
3424 else
3425 val &= ~GEM_BIT(TXCOEN);
3426
3427 gem_writel(bp, DMACFG, val);
3428}
3429
3430static inline void macb_set_rxcsum_feature(struct macb *bp,
3431 netdev_features_t features)
3432{
3433 struct net_device *netdev = bp->dev;
3434 u32 val;
3435
3436 if (!macb_is_gem(bp))
3437 return;
3438
3439 val = gem_readl(bp, NCFGR);
3440 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
3441 val |= GEM_BIT(RXCOEN);
3442 else
3443 val &= ~GEM_BIT(RXCOEN);
3444
3445 gem_writel(bp, NCFGR, val);
3446}
3447
3448static inline void macb_set_rxflow_feature(struct macb *bp,
3449 netdev_features_t features)
3450{
3451 if (!macb_is_gem(bp))
3452 return;
3453
3454 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
3455}
3456
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003457static int macb_set_features(struct net_device *netdev,
3458 netdev_features_t features)
3459{
3460 struct macb *bp = netdev_priv(netdev);
3461 netdev_features_t changed = features ^ netdev->features;
3462
3463 /* TX checksum offload */
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003464 if (changed & NETIF_F_HW_CSUM)
3465 macb_set_txcsum_feature(bp, features);
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003466
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003467 /* RX checksum offload */
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003468 if (changed & NETIF_F_RXCSUM)
3469 macb_set_rxcsum_feature(bp, features);
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003470
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003471 /* RX Flow Filters */
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003472 if (changed & NETIF_F_NTUPLE)
3473 macb_set_rxflow_feature(bp, features);
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003474
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003475 return 0;
3476}
3477
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00003478static void macb_restore_features(struct macb *bp)
3479{
3480 struct net_device *netdev = bp->dev;
3481 netdev_features_t features = netdev->features;
3482
3483 /* TX checksum offload */
3484 macb_set_txcsum_feature(bp, features);
3485
3486 /* RX checksum offload */
3487 macb_set_rxcsum_feature(bp, features);
3488
3489 /* RX Flow Filters */
3490 macb_set_rxflow_feature(bp, features);
3491}
3492
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003493static const struct net_device_ops macb_netdev_ops = {
3494 .ndo_open = macb_open,
3495 .ndo_stop = macb_close,
3496 .ndo_start_xmit = macb_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00003497 .ndo_set_rx_mode = macb_set_rx_mode,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003498 .ndo_get_stats = macb_get_stats,
3499 .ndo_do_ioctl = macb_ioctl,
3500 .ndo_validate_addr = eth_validate_addr,
Harini Katakama5898ea2015-05-06 22:27:18 +05303501 .ndo_change_mtu = macb_change_mtu,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003502 .ndo_set_mac_address = eth_mac_addr,
Thomas Petazzoni6e8cf5c2009-05-04 11:08:41 -07003503#ifdef CONFIG_NET_POLL_CONTROLLER
3504 .ndo_poll_controller = macb_poll_controller,
3505#endif
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003506 .ndo_set_features = macb_set_features,
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00003507 .ndo_features_check = macb_features_check,
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003508};
3509
Moritz Fischer64ec42f2016-03-29 19:11:12 -07003510/* Configure peripheral capabilities according to device tree
Nicolas Ferree1755872014-07-24 13:50:58 +02003511 * and integration options used
3512 */
Moritz Fischer64ec42f2016-03-29 19:11:12 -07003513static void macb_configure_caps(struct macb *bp,
3514 const struct macb_config *dt_conf)
Nicolas Ferree1755872014-07-24 13:50:58 +02003515{
3516 u32 dcfg;
Nicolas Ferree1755872014-07-24 13:50:58 +02003517
Nicolas Ferref6970502015-03-31 15:02:01 +02003518 if (dt_conf)
3519 bp->caps = dt_conf->caps;
3520
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03003521 if (hw_is_gem(bp->regs, bp->native_io)) {
Nicolas Ferree1755872014-07-24 13:50:58 +02003522 bp->caps |= MACB_CAPS_MACB_IS_GEM;
3523
Nicolas Ferree1755872014-07-24 13:50:58 +02003524 dcfg = gem_readl(bp, DCFG1);
3525 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
3526 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
3527 dcfg = gem_readl(bp, DCFG2);
3528 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
3529 bp->caps |= MACB_CAPS_FIFO_MODE;
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003530#ifdef CONFIG_MACB_USE_HWSTAMP
3531 if (gem_has_ptp(bp)) {
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003532 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
Antoine Tenart7897b072019-11-13 10:00:06 +01003533 dev_err(&bp->pdev->dev,
3534 "GEM doesn't support hardware ptp.\n");
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003535 else {
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003536 bp->hw_dma_cap |= HW_DMA_CAP_PTP;
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003537 bp->ptp_info = &gem_ptp_info;
3538 }
Rafal Ozieblo7b429612017-06-29 07:12:51 +01003539 }
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01003540#endif
Nicolas Ferree1755872014-07-24 13:50:58 +02003541 }
3542
Andy Shevchenkoa35919e2015-07-24 21:24:01 +03003543 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
Nicolas Ferree1755872014-07-24 13:50:58 +02003544}
3545
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003546static void macb_probe_queues(void __iomem *mem,
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03003547 bool native_io,
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003548 unsigned int *queue_mask,
3549 unsigned int *num_queues)
3550{
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003551 *queue_mask = 0x1;
3552 *num_queues = 1;
3553
Nicolas Ferreda120112015-03-31 15:02:00 +02003554 /* is it macb or gem ?
3555 *
3556 * We need to read directly from the hardware here because
3557 * we are early in the probe process and don't have the
3558 * MACB_CAPS_MACB_IS_GEM flag positioned
3559 */
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03003560 if (!hw_is_gem(mem, native_io))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003561 return;
3562
3563 /* bit 0 is never set but queue 0 always exists */
Claudiu Bezneafec371f2020-07-02 12:05:58 +03003564 *queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xff;
Claudiu Bezneab7ab39b2020-07-02 12:05:59 +03003565 *num_queues = hweight32(*queue_mask);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003566}
3567
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003568static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303569 struct clk **hclk, struct clk **tx_clk,
Harini Katakamf5473d12019-03-01 16:20:33 +05303570 struct clk **rx_clk, struct clk **tsu_clk)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003571{
Bartosz Folta83a77e92016-12-14 06:39:15 +00003572 struct macb_platform_data *pdata;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003573 int err;
3574
Bartosz Folta83a77e92016-12-14 06:39:15 +00003575 pdata = dev_get_platdata(&pdev->dev);
3576 if (pdata) {
3577 *pclk = pdata->pclk;
3578 *hclk = pdata->hclk;
3579 } else {
3580 *pclk = devm_clk_get(&pdev->dev, "pclk");
3581 *hclk = devm_clk_get(&pdev->dev, "hclk");
3582 }
3583
Harini Katakamcd5afa92019-03-20 19:12:22 +05303584 if (IS_ERR_OR_NULL(*pclk)) {
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003585 err = PTR_ERR(*pclk);
Harini Katakamcd5afa92019-03-20 19:12:22 +05303586 if (!err)
3587 err = -ENODEV;
3588
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003589 dev_err(&pdev->dev, "failed to get macb_clk (%d)\n", err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003590 return err;
3591 }
3592
Harini Katakamcd5afa92019-03-20 19:12:22 +05303593 if (IS_ERR_OR_NULL(*hclk)) {
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003594 err = PTR_ERR(*hclk);
Harini Katakamcd5afa92019-03-20 19:12:22 +05303595 if (!err)
3596 err = -ENODEV;
3597
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003598 dev_err(&pdev->dev, "failed to get hclk (%d)\n", err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003599 return err;
3600 }
3601
Michael Tretterbd310aca2019-10-18 16:11:43 +02003602 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003603 if (IS_ERR(*tx_clk))
Michael Tretterbd310aca2019-10-18 16:11:43 +02003604 return PTR_ERR(*tx_clk);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003605
Michael Tretterbd310aca2019-10-18 16:11:43 +02003606 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303607 if (IS_ERR(*rx_clk))
Michael Tretterbd310aca2019-10-18 16:11:43 +02003608 return PTR_ERR(*rx_clk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303609
Michael Tretterbd310aca2019-10-18 16:11:43 +02003610 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
Harini Katakamf5473d12019-03-01 16:20:33 +05303611 if (IS_ERR(*tsu_clk))
Michael Tretterbd310aca2019-10-18 16:11:43 +02003612 return PTR_ERR(*tsu_clk);
Harini Katakamf5473d12019-03-01 16:20:33 +05303613
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003614 err = clk_prepare_enable(*pclk);
3615 if (err) {
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003616 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003617 return err;
3618 }
3619
3620 err = clk_prepare_enable(*hclk);
3621 if (err) {
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003622 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003623 goto err_disable_pclk;
3624 }
3625
3626 err = clk_prepare_enable(*tx_clk);
3627 if (err) {
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003628 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003629 goto err_disable_hclk;
3630 }
3631
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303632 err = clk_prepare_enable(*rx_clk);
3633 if (err) {
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003634 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303635 goto err_disable_txclk;
3636 }
3637
Harini Katakamf5473d12019-03-01 16:20:33 +05303638 err = clk_prepare_enable(*tsu_clk);
3639 if (err) {
Luca Ceresolif413cbb2019-05-14 15:23:07 +02003640 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
Harini Katakamf5473d12019-03-01 16:20:33 +05303641 goto err_disable_rxclk;
3642 }
3643
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003644 return 0;
3645
Harini Katakamf5473d12019-03-01 16:20:33 +05303646err_disable_rxclk:
3647 clk_disable_unprepare(*rx_clk);
3648
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05303649err_disable_txclk:
3650 clk_disable_unprepare(*tx_clk);
3651
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003652err_disable_hclk:
3653 clk_disable_unprepare(*hclk);
3654
3655err_disable_pclk:
3656 clk_disable_unprepare(*pclk);
3657
3658 return err;
3659}
3660
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003661static int macb_init(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003662{
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003663 struct net_device *dev = platform_get_drvdata(pdev);
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003664 unsigned int hw_q, q;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003665 struct macb *bp = netdev_priv(dev);
3666 struct macb_queue *queue;
3667 int err;
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003668 u32 val, reg;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003669
Zach Brownb410d132016-10-19 09:56:57 -05003670 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
3671 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
3672
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003673 /* set the queue register mapping once for all: queue0 has a special
3674 * register mapping but we don't want to test the queue index then
3675 * compute the corresponding register offset at run time.
3676 */
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003677 for (hw_q = 0, q = 0; hw_q < MACB_MAX_QUEUES; ++hw_q) {
Nicolas Ferrebfa09142015-03-31 15:01:59 +02003678 if (!(bp->queue_mask & (1 << hw_q)))
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003679 continue;
Jamie Iles461845d2011-03-08 20:19:23 +00003680
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003681 queue = &bp->queues[q];
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003682 queue->bp = bp;
Antoine Tenart760a3c12019-06-21 17:28:55 +02003683 netif_napi_add(dev, &queue->napi, macb_poll, NAPI_POLL_WEIGHT);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003684 if (hw_q) {
3685 queue->ISR = GEM_ISR(hw_q - 1);
3686 queue->IER = GEM_IER(hw_q - 1);
3687 queue->IDR = GEM_IDR(hw_q - 1);
3688 queue->IMR = GEM_IMR(hw_q - 1);
3689 queue->TBQP = GEM_TBQP(hw_q - 1);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003690 queue->RBQP = GEM_RBQP(hw_q - 1);
3691 queue->RBQS = GEM_RBQS(hw_q - 1);
Harini Katakamfff80192016-08-09 13:15:53 +05303692#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003693 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003694 queue->TBQPH = GEM_TBQPH(hw_q - 1);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003695 queue->RBQPH = GEM_RBQPH(hw_q - 1);
3696 }
Harini Katakamfff80192016-08-09 13:15:53 +05303697#endif
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003698 } else {
3699 /* queue0 uses legacy registers */
3700 queue->ISR = MACB_ISR;
3701 queue->IER = MACB_IER;
3702 queue->IDR = MACB_IDR;
3703 queue->IMR = MACB_IMR;
3704 queue->TBQP = MACB_TBQP;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003705 queue->RBQP = MACB_RBQP;
Harini Katakamfff80192016-08-09 13:15:53 +05303706#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003707 if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003708 queue->TBQPH = MACB_TBQPH;
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003709 queue->RBQPH = MACB_RBQPH;
3710 }
Harini Katakamfff80192016-08-09 13:15:53 +05303711#endif
Soren Brinkmanne1824df2013-12-10 16:07:23 -08003712 }
Soren Brinkmanne1824df2013-12-10 16:07:23 -08003713
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003714 /* get irq: here we use the linux queue index, not the hardware
3715 * queue index. the queue irq definitions in the device tree
3716 * must remove the optional gaps that could exist in the
3717 * hardware queue mask.
3718 */
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003719 queue->irq = platform_get_irq(pdev, q);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003720 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
Punnaiah Choudary Kalluri20488232015-03-06 18:29:12 +01003721 IRQF_SHARED, dev->name, queue);
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003722 if (err) {
3723 dev_err(&pdev->dev,
3724 "Unable to request IRQ %d (error %d)\n",
3725 queue->irq, err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02003726 return err;
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003727 }
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003728
Cyrille Pitchen02c958d2014-12-12 13:26:44 +01003729 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
Cyrille Pitchencf250de2014-12-15 15:13:32 +01003730 q++;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003731 }
3732
Alexander Beregalov5f1fa992009-04-11 07:42:26 +00003733 dev->netdev_ops = &macb_netdev_ops;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003734
Nicolas Ferre4df95132013-06-04 21:57:12 +00003735 /* setup appropriated routines according to adapter type */
3736 if (macb_is_gem(bp)) {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003737 bp->max_tx_length = GEM_MAX_TX_LEN;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003738 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
3739 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
3740 bp->macbgem_ops.mog_init_rings = gem_init_rings;
3741 bp->macbgem_ops.mog_rx = gem_rx;
Xander Huff8cd5a562015-01-15 15:55:20 -06003742 dev->ethtool_ops = &gem_ethtool_ops;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003743 } else {
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003744 bp->max_tx_length = MACB_MAX_TX_LEN;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003745 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
3746 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
3747 bp->macbgem_ops.mog_init_rings = macb_init_rings;
3748 bp->macbgem_ops.mog_rx = macb_rx;
Xander Huff8cd5a562015-01-15 15:55:20 -06003749 dev->ethtool_ops = &macb_ethtool_ops;
Nicolas Ferre4df95132013-06-04 21:57:12 +00003750 }
3751
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003752 /* Set features */
3753 dev->hw_features = NETIF_F_SG;
Rafal Ozieblo1629dd42016-11-16 10:02:34 +00003754
3755 /* Check LSO capability */
3756 if (GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
3757 dev->hw_features |= MACB_NETIF_LSO;
3758
Cyrille Pitchen85ff3d82014-07-24 13:51:00 +02003759 /* Checksum offload is only available on gem with packet buffer */
3760 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
Cyrille Pitchen924ec532014-07-24 13:51:01 +02003761 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
Cyrille Pitchena4c35ed32014-07-24 13:50:59 +02003762 if (bp->caps & MACB_CAPS_SG_DISABLED)
3763 dev->hw_features &= ~NETIF_F_SG;
3764 dev->features = dev->hw_features;
3765
Rafal Oziebloae8223de2017-11-30 18:20:44 +00003766 /* Check RX Flow Filters support.
3767 * Max Rx flows set by availability of screeners & compare regs:
3768 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
3769 */
3770 reg = gem_readl(bp, DCFG8);
3771 bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
3772 GEM_BFEXT(T2SCR, reg));
3773 if (bp->max_tuples > 0) {
3774 /* also needs one ethtype match to check IPv4 */
3775 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
3776 /* program this reg now */
3777 reg = 0;
3778 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
3779 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
3780 /* Filtering is supported in hw but don't enable it in kernel now */
3781 dev->hw_features |= NETIF_F_NTUPLE;
3782 /* init Rx flow definitions */
3783 INIT_LIST_HEAD(&bp->rx_fs_list.list);
3784 bp->rx_fs_list.count = 0;
3785 spin_lock_init(&bp->rx_fs_lock);
3786 } else
3787 bp->max_tuples = 0;
3788 }
3789
Neil Armstrongce721a72016-01-05 14:39:16 +01003790 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
3791 val = 0;
Alexandre Belloni2ccb0162020-07-18 01:32:21 +02003792 if (phy_interface_mode_is_rgmii(bp->phy_interface))
Neil Armstrongce721a72016-01-05 14:39:16 +01003793 val = GEM_BIT(RGMII);
3794 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003795 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
Neil Armstrongce721a72016-01-05 14:39:16 +01003796 val = MACB_BIT(RMII);
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01003797 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
Neil Armstrongce721a72016-01-05 14:39:16 +01003798 val = MACB_BIT(MII);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01003799
Neil Armstrongce721a72016-01-05 14:39:16 +01003800 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
3801 val |= MACB_BIT(CLKEN);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003802
Neil Armstrongce721a72016-01-05 14:39:16 +01003803 macb_or_gem_writel(bp, USRIO, val);
3804 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003805
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003806 /* Set MII management clock divider */
3807 val = macb_mdc_clk_div(bp);
3808 val |= macb_dbw(bp);
Punnaiah Choudary Kalluri022be252015-11-18 09:03:50 +05303809 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
3810 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003811 macb_writel(bp, NCFGR, val);
3812
3813 return 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003814}
3815
3816#if defined(CONFIG_OF)
3817/* 1518 rounded up */
3818#define AT91ETHER_MAX_RBUFF_SZ 0x600
3819/* max number of receive buffers */
3820#define AT91ETHER_MAX_RX_DESCR 9
3821
Arnd Bergmann49db9222019-07-08 14:48:23 +02003822static struct sifive_fu540_macb_mgmt *mgmt;
3823
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003824static int at91ether_alloc_coherent(struct macb *lp)
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003825{
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003826 struct macb_queue *q = &lp->queues[0];
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003827
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003828 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003829 (AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003830 macb_dma_desc_get_size(lp)),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003831 &q->rx_ring_dma, GFP_KERNEL);
3832 if (!q->rx_ring)
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003833 return -ENOMEM;
3834
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003835 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003836 AT91ETHER_MAX_RX_DESCR *
3837 AT91ETHER_MAX_RBUFF_SZ,
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003838 &q->rx_buffers_dma, GFP_KERNEL);
3839 if (!q->rx_buffers) {
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003840 dma_free_coherent(&lp->pdev->dev,
3841 AT91ETHER_MAX_RX_DESCR *
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003842 macb_dma_desc_get_size(lp),
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003843 q->rx_ring, q->rx_ring_dma);
3844 q->rx_ring = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003845 return -ENOMEM;
3846 }
3847
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003848 return 0;
3849}
3850
3851static void at91ether_free_coherent(struct macb *lp)
3852{
3853 struct macb_queue *q = &lp->queues[0];
3854
3855 if (q->rx_ring) {
3856 dma_free_coherent(&lp->pdev->dev,
3857 AT91ETHER_MAX_RX_DESCR *
3858 macb_dma_desc_get_size(lp),
3859 q->rx_ring, q->rx_ring_dma);
3860 q->rx_ring = NULL;
3861 }
3862
3863 if (q->rx_buffers) {
3864 dma_free_coherent(&lp->pdev->dev,
3865 AT91ETHER_MAX_RX_DESCR *
3866 AT91ETHER_MAX_RBUFF_SZ,
3867 q->rx_buffers, q->rx_buffers_dma);
3868 q->rx_buffers = NULL;
3869 }
3870}
3871
3872/* Initialize and start the Receiver and Transmit subsystems */
3873static int at91ether_start(struct macb *lp)
3874{
3875 struct macb_queue *q = &lp->queues[0];
3876 struct macb_dma_desc *desc;
3877 dma_addr_t addr;
3878 u32 ctl;
3879 int i, ret;
3880
3881 ret = at91ether_alloc_coherent(lp);
3882 if (ret)
3883 return ret;
3884
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003885 addr = q->rx_buffers_dma;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003886 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003887 desc = macb_rx_desc(q, i);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003888 macb_set_addr(lp, desc, addr);
3889 desc->ctrl = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003890 addr += AT91ETHER_MAX_RBUFF_SZ;
3891 }
3892
3893 /* Set the Wrap bit on the last descriptor */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00003894 desc->addr |= MACB_BIT(RX_WRAP);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003895
3896 /* Reset buffer index */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003897 q->rx_tail = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003898
3899 /* Program address of descriptor list in Rx Buffer Queue register */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00003900 macb_writel(lp, RBQP, q->rx_ring_dma);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003901
3902 /* Enable Receive and Transmit */
3903 ctl = macb_readl(lp, NCR);
3904 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
3905
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003906 /* Enable MAC interrupts */
3907 macb_writel(lp, IER, MACB_BIT(RCOMP) |
3908 MACB_BIT(RXUBR) |
3909 MACB_BIT(ISR_TUND) |
3910 MACB_BIT(ISR_RLE) |
3911 MACB_BIT(TCOMP) |
3912 MACB_BIT(ISR_ROVR) |
3913 MACB_BIT(HRESP));
3914
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003915 return 0;
3916}
3917
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003918static void at91ether_stop(struct macb *lp)
3919{
3920 u32 ctl;
3921
3922 /* Disable MAC interrupts */
3923 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
3924 MACB_BIT(RXUBR) |
3925 MACB_BIT(ISR_TUND) |
3926 MACB_BIT(ISR_RLE) |
3927 MACB_BIT(TCOMP) |
3928 MACB_BIT(ISR_ROVR) |
3929 MACB_BIT(HRESP));
3930
3931 /* Disable Receiver and Transmitter */
3932 ctl = macb_readl(lp, NCR);
3933 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
3934
3935 /* Free resources. */
3936 at91ether_free_coherent(lp);
3937}
3938
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003939/* Open the ethernet interface */
3940static int at91ether_open(struct net_device *dev)
3941{
3942 struct macb *lp = netdev_priv(dev);
3943 u32 ctl;
3944 int ret;
3945
Alexandre Bellonie6a41c22020-02-12 17:45:38 +01003946 ret = pm_runtime_get_sync(&lp->pdev->dev);
Andy Shevchenko0ce205d2020-04-27 13:51:20 +03003947 if (ret < 0) {
3948 pm_runtime_put_noidle(&lp->pdev->dev);
Alexandre Bellonie6a41c22020-02-12 17:45:38 +01003949 return ret;
Andy Shevchenko0ce205d2020-04-27 13:51:20 +03003950 }
Alexandre Bellonie6a41c22020-02-12 17:45:38 +01003951
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003952 /* Clear internal statistics */
3953 ctl = macb_readl(lp, NCR);
3954 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
3955
3956 macb_set_hwaddr(lp);
3957
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003958 ret = at91ether_start(lp);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003959 if (ret)
Claudiu Beznea0eaf2282020-06-24 13:08:17 +03003960 goto pm_exit;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003961
Antoine Tenart7897b072019-11-13 10:00:06 +01003962 ret = macb_phylink_connect(lp);
3963 if (ret)
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003964 goto stop;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003965
3966 netif_start_queue(dev);
3967
3968 return 0;
Claudiu Beznea0eaf2282020-06-24 13:08:17 +03003969
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003970stop:
3971 at91ether_stop(lp);
Claudiu Beznea0eaf2282020-06-24 13:08:17 +03003972pm_exit:
3973 pm_runtime_put_sync(&lp->pdev->dev);
3974 return ret;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003975}
3976
3977/* Close the interface */
3978static int at91ether_close(struct net_device *dev)
3979{
3980 struct macb *lp = netdev_priv(dev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003981
3982 netif_stop_queue(dev);
3983
Antoine Tenart7897b072019-11-13 10:00:06 +01003984 phylink_stop(lp->phylink);
3985 phylink_disconnect_phy(lp->phylink);
3986
Claudiu Beznea33fdef22020-06-24 13:08:18 +03003987 at91ether_stop(lp);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003988
Alexandre Bellonie6a41c22020-02-12 17:45:38 +01003989 return pm_runtime_put(&lp->pdev->dev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003990}
3991
3992/* Transmit packet */
Claudiu Beznead1c38952018-08-07 12:25:12 +03003993static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
3994 struct net_device *dev)
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01003995{
3996 struct macb *lp = netdev_priv(dev);
3997
3998 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
3999 netif_stop_queue(dev);
4000
4001 /* Store packet information (to free when Tx completed) */
4002 lp->skb = skb;
4003 lp->skb_length = skb->len;
Christoph Hellwig564923e2019-02-11 14:19:59 +01004004 lp->skb_physaddr = dma_map_single(&lp->pdev->dev, skb->data,
4005 skb->len, DMA_TO_DEVICE);
4006 if (dma_mapping_error(&lp->pdev->dev, lp->skb_physaddr)) {
Alexey Khoroshilov178c7ae2016-11-19 01:40:10 +03004007 dev_kfree_skb_any(skb);
4008 dev->stats.tx_dropped++;
4009 netdev_err(dev, "%s: DMA mapping error\n", __func__);
4010 return NETDEV_TX_OK;
4011 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004012
4013 /* Set address of the data in the Transmit Address register */
4014 macb_writel(lp, TAR, lp->skb_physaddr);
4015 /* Set length of the packet in the Transmit Control register */
4016 macb_writel(lp, TCR, skb->len);
4017
4018 } else {
4019 netdev_err(dev, "%s called, but device is busy!\n", __func__);
4020 return NETDEV_TX_BUSY;
4021 }
4022
4023 return NETDEV_TX_OK;
4024}
4025
4026/* Extract received frame from buffer descriptors and sent to upper layers.
4027 * (Called from interrupt context)
4028 */
4029static void at91ether_rx(struct net_device *dev)
4030{
4031 struct macb *lp = netdev_priv(dev);
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00004032 struct macb_queue *q = &lp->queues[0];
Rafal Ozieblodc97a892017-01-27 15:08:20 +00004033 struct macb_dma_desc *desc;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004034 unsigned char *p_recv;
4035 struct sk_buff *skb;
4036 unsigned int pktlen;
4037
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00004038 desc = macb_rx_desc(q, q->rx_tail);
Rafal Ozieblodc97a892017-01-27 15:08:20 +00004039 while (desc->addr & MACB_BIT(RX_USED)) {
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00004040 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00004041 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004042 skb = netdev_alloc_skb(dev, pktlen + 2);
4043 if (skb) {
4044 skb_reserve(skb, 2);
Johannes Berg59ae1d12017-06-16 14:29:20 +02004045 skb_put_data(skb, p_recv, pktlen);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004046
4047 skb->protocol = eth_type_trans(skb, dev);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02004048 dev->stats.rx_packets++;
4049 dev->stats.rx_bytes += pktlen;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004050 netif_rx(skb);
4051 } else {
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02004052 dev->stats.rx_dropped++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004053 }
4054
Rafal Ozieblodc97a892017-01-27 15:08:20 +00004055 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02004056 dev->stats.multicast++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004057
4058 /* reset ownership bit */
Rafal Ozieblodc97a892017-01-27 15:08:20 +00004059 desc->addr &= ~MACB_BIT(RX_USED);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004060
4061 /* wrap after last buffer */
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00004062 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
4063 q->rx_tail = 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004064 else
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00004065 q->rx_tail++;
Rafal Ozieblodc97a892017-01-27 15:08:20 +00004066
Rafal Oziebloae1f2a52017-11-30 18:19:15 +00004067 desc = macb_rx_desc(q, q->rx_tail);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004068 }
4069}
4070
4071/* MAC interrupt handler */
4072static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
4073{
4074 struct net_device *dev = dev_id;
4075 struct macb *lp = netdev_priv(dev);
4076 u32 intstatus, ctl;
4077
4078 /* MAC Interrupt Status register indicates what interrupts are pending.
4079 * It is automatically cleared once read.
4080 */
4081 intstatus = macb_readl(lp, ISR);
4082
4083 /* Receive complete */
4084 if (intstatus & MACB_BIT(RCOMP))
4085 at91ether_rx(dev);
4086
4087 /* Transmit complete */
4088 if (intstatus & MACB_BIT(TCOMP)) {
4089 /* The TCOM bit is set even if the transmission failed */
4090 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02004091 dev->stats.tx_errors++;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004092
4093 if (lp->skb) {
Yang Weib9560a22019-02-13 00:00:02 +08004094 dev_consume_skb_irq(lp->skb);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004095 lp->skb = NULL;
Christoph Hellwig564923e2019-02-11 14:19:59 +01004096 dma_unmap_single(&lp->pdev->dev, lp->skb_physaddr,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004097 lp->skb_length, DMA_TO_DEVICE);
Tobias Klauser5f1d3a52017-04-07 10:17:30 +02004098 dev->stats.tx_packets++;
4099 dev->stats.tx_bytes += lp->skb_length;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004100 }
4101 netif_wake_queue(dev);
4102 }
4103
4104 /* Work-around for EMAC Errata section 41.3.1 */
4105 if (intstatus & MACB_BIT(RXUBR)) {
4106 ctl = macb_readl(lp, NCR);
4107 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
Zumeng Chenffac0e92016-11-28 21:55:00 +08004108 wmb();
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004109 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
4110 }
4111
4112 if (intstatus & MACB_BIT(ISR_ROVR))
4113 netdev_err(dev, "ROVR error\n");
4114
4115 return IRQ_HANDLED;
4116}
4117
4118#ifdef CONFIG_NET_POLL_CONTROLLER
4119static void at91ether_poll_controller(struct net_device *dev)
4120{
4121 unsigned long flags;
4122
4123 local_irq_save(flags);
4124 at91ether_interrupt(dev->irq, dev);
4125 local_irq_restore(flags);
4126}
4127#endif
4128
4129static const struct net_device_ops at91ether_netdev_ops = {
4130 .ndo_open = at91ether_open,
4131 .ndo_stop = at91ether_close,
4132 .ndo_start_xmit = at91ether_start_xmit,
4133 .ndo_get_stats = macb_get_stats,
4134 .ndo_set_rx_mode = macb_set_rx_mode,
4135 .ndo_set_mac_address = eth_mac_addr,
4136 .ndo_do_ioctl = macb_ioctl,
4137 .ndo_validate_addr = eth_validate_addr,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004138#ifdef CONFIG_NET_POLL_CONTROLLER
4139 .ndo_poll_controller = at91ether_poll_controller,
4140#endif
4141};
4142
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004143static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304144 struct clk **hclk, struct clk **tx_clk,
Harini Katakamf5473d12019-03-01 16:20:33 +05304145 struct clk **rx_clk, struct clk **tsu_clk)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004146{
4147 int err;
4148
4149 *hclk = NULL;
4150 *tx_clk = NULL;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304151 *rx_clk = NULL;
Harini Katakamf5473d12019-03-01 16:20:33 +05304152 *tsu_clk = NULL;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004153
4154 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
4155 if (IS_ERR(*pclk))
4156 return PTR_ERR(*pclk);
4157
4158 err = clk_prepare_enable(*pclk);
4159 if (err) {
Luca Ceresolif413cbb2019-05-14 15:23:07 +02004160 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004161 return err;
4162 }
4163
4164 return 0;
4165}
4166
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004167static int at91ether_init(struct platform_device *pdev)
4168{
4169 struct net_device *dev = platform_get_drvdata(pdev);
4170 struct macb *bp = netdev_priv(dev);
4171 int err;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004172
Alexandre Bellonifec9d3b2018-06-26 10:44:01 +02004173 bp->queues[0].bp = bp;
4174
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004175 dev->netdev_ops = &at91ether_netdev_ops;
4176 dev->ethtool_ops = &macb_ethtool_ops;
4177
4178 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
4179 0, dev->name, dev);
4180 if (err)
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004181 return err;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004182
4183 macb_writel(bp, NCR, 0);
4184
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +01004185 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004186
4187 return 0;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004188}
4189
Yash Shahc218ad52019-06-18 13:26:08 +05304190static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
4191 unsigned long parent_rate)
4192{
4193 return mgmt->rate;
4194}
4195
4196static long fu540_macb_tx_round_rate(struct clk_hw *hw, unsigned long rate,
4197 unsigned long *parent_rate)
4198{
4199 if (WARN_ON(rate < 2500000))
4200 return 2500000;
4201 else if (rate == 2500000)
4202 return 2500000;
4203 else if (WARN_ON(rate < 13750000))
4204 return 2500000;
4205 else if (WARN_ON(rate < 25000000))
4206 return 25000000;
4207 else if (rate == 25000000)
4208 return 25000000;
4209 else if (WARN_ON(rate < 75000000))
4210 return 25000000;
4211 else if (WARN_ON(rate < 125000000))
4212 return 125000000;
4213 else if (rate == 125000000)
4214 return 125000000;
4215
4216 WARN_ON(rate > 125000000);
4217
4218 return 125000000;
4219}
4220
4221static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
4222 unsigned long parent_rate)
4223{
4224 rate = fu540_macb_tx_round_rate(hw, rate, &parent_rate);
4225 if (rate != 125000000)
4226 iowrite32(1, mgmt->reg);
4227 else
4228 iowrite32(0, mgmt->reg);
4229 mgmt->rate = rate;
4230
4231 return 0;
4232}
4233
4234static const struct clk_ops fu540_c000_ops = {
4235 .recalc_rate = fu540_macb_tx_recalc_rate,
4236 .round_rate = fu540_macb_tx_round_rate,
4237 .set_rate = fu540_macb_tx_set_rate,
4238};
4239
4240static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
4241 struct clk **hclk, struct clk **tx_clk,
4242 struct clk **rx_clk, struct clk **tsu_clk)
4243{
4244 struct clk_init_data init;
4245 int err = 0;
4246
4247 err = macb_clk_init(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
4248 if (err)
4249 return err;
4250
4251 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
4252 if (!mgmt)
4253 return -ENOMEM;
4254
4255 init.name = "sifive-gemgxl-mgmt";
4256 init.ops = &fu540_c000_ops;
4257 init.flags = 0;
4258 init.num_parents = 0;
4259
4260 mgmt->rate = 0;
4261 mgmt->hw.init = &init;
4262
Stephen Boydd89091a2020-01-03 16:19:21 -08004263 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
Yash Shahc218ad52019-06-18 13:26:08 +05304264 if (IS_ERR(*tx_clk))
4265 return PTR_ERR(*tx_clk);
4266
4267 err = clk_prepare_enable(*tx_clk);
4268 if (err)
4269 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
4270 else
4271 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
4272
4273 return 0;
4274}
4275
4276static int fu540_c000_init(struct platform_device *pdev)
4277{
Dejin Zhengb959c772020-05-03 20:32:26 +08004278 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
4279 if (IS_ERR(mgmt->reg))
4280 return PTR_ERR(mgmt->reg);
Yash Shahc218ad52019-06-18 13:26:08 +05304281
4282 return macb_init(pdev);
4283}
4284
4285static const struct macb_config fu540_c000_config = {
4286 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
4287 MACB_CAPS_GEM_HAS_PTP,
4288 .dma_burst_length = 16,
4289 .clk_init = fu540_c000_clk_init,
4290 .init = fu540_c000_init,
4291 .jumbo_max_len = 10240,
4292};
4293
David S. Miller3cef5c52015-03-09 23:38:02 -04004294static const struct macb_config at91sam9260_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01004295 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004296 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004297 .init = macb_init,
4298};
4299
Nicolas Ferreeb4ed8e2018-09-14 17:48:10 +02004300static const struct macb_config sama5d3macb_config = {
4301 .caps = MACB_CAPS_SG_DISABLED
4302 | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
4303 .clk_init = macb_clk_init,
4304 .init = macb_init,
4305};
4306
David S. Miller3cef5c52015-03-09 23:38:02 -04004307static const struct macb_config pc302gem_config = {
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004308 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
4309 .dma_burst_length = 16,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004310 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004311 .init = macb_init,
4312};
4313
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02004314static const struct macb_config sama5d2_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01004315 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02004316 .dma_burst_length = 16,
4317 .clk_init = macb_clk_init,
4318 .init = macb_init,
4319};
4320
David S. Miller3cef5c52015-03-09 23:38:02 -04004321static const struct macb_config sama5d3_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01004322 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE
vishnuvardhan233a1582017-07-05 17:36:16 +02004323 | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004324 .dma_burst_length = 16,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004325 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004326 .init = macb_init,
vishnuvardhan233a1582017-07-05 17:36:16 +02004327 .jumbo_max_len = 10240,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004328};
4329
David S. Miller3cef5c52015-03-09 23:38:02 -04004330static const struct macb_config sama5d4_config = {
Nicolas Ferre6bdaa5e2016-03-10 16:44:32 +01004331 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004332 .dma_burst_length = 4,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004333 .clk_init = macb_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004334 .init = macb_init,
4335};
4336
David S. Miller3cef5c52015-03-09 23:38:02 -04004337static const struct macb_config emac_config = {
Alexandre Belloniac2fcfa2020-02-19 15:15:51 +01004338 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004339 .clk_init = at91ether_clk_init,
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004340 .init = at91ether_init,
4341};
4342
Neil Armstronge611b5b2016-01-05 14:39:17 +01004343static const struct macb_config np4_config = {
4344 .caps = MACB_CAPS_USRIO_DISABLED,
4345 .clk_init = macb_clk_init,
4346 .init = macb_init,
4347};
David S. Miller36583eb2015-05-23 01:22:35 -04004348
Harini Katakam7b61f9c2015-05-06 22:27:16 +05304349static const struct macb_config zynqmp_config = {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01004350 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4351 MACB_CAPS_JUMBO |
Harini Katakam404cd082018-07-06 12:18:58 +05304352 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
Harini Katakam7b61f9c2015-05-06 22:27:16 +05304353 .dma_burst_length = 16,
4354 .clk_init = macb_clk_init,
4355 .init = macb_init,
Harini Katakam98b5a0f42015-05-06 22:27:17 +05304356 .jumbo_max_len = 10240,
Harini Katakam7b61f9c2015-05-06 22:27:16 +05304357};
4358
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05004359static const struct macb_config zynq_config = {
Harini Katakame5010702019-01-29 15:20:03 +05304360 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
4361 MACB_CAPS_NEEDS_RSTONUBR,
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05004362 .dma_burst_length = 16,
4363 .clk_init = macb_clk_init,
4364 .init = macb_init,
4365};
4366
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004367static const struct of_device_id macb_dt_ids[] = {
4368 { .compatible = "cdns,at32ap7000-macb" },
4369 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
4370 { .compatible = "cdns,macb" },
Neil Armstronge611b5b2016-01-05 14:39:17 +01004371 { .compatible = "cdns,np4-macb", .data = &np4_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004372 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
4373 { .compatible = "cdns,gem", .data = &pc302gem_config },
Nicolas Ferre3e3e0cd2019-02-06 18:56:10 +01004374 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
Cyrille Pitchen5c8fe712015-06-18 16:27:23 +02004375 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004376 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
Nicolas Ferreeb4ed8e2018-09-14 17:48:10 +02004377 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004378 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
4379 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
4380 { .compatible = "cdns,emac", .data = &emac_config },
Harini Katakam7b61f9c2015-05-06 22:27:16 +05304381 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config},
Nathan Sullivan222ca8e2015-05-22 09:22:10 -05004382 { .compatible = "cdns,zynq-gem", .data = &zynq_config },
Yash Shah6342ea82019-08-27 10:36:04 +05304383 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004384 { /* sentinel */ }
4385};
4386MODULE_DEVICE_TABLE(of, macb_dt_ids);
4387#endif /* CONFIG_OF */
4388
Bartosz Folta83a77e92016-12-14 06:39:15 +00004389static const struct macb_config default_gem_config = {
Rafal Oziebloab91f0a2017-06-29 07:14:16 +01004390 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
4391 MACB_CAPS_JUMBO |
4392 MACB_CAPS_GEM_HAS_PTP,
Bartosz Folta83a77e92016-12-14 06:39:15 +00004393 .dma_burst_length = 16,
4394 .clk_init = macb_clk_init,
4395 .init = macb_init,
4396 .jumbo_max_len = 10240,
4397};
4398
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004399static int macb_probe(struct platform_device *pdev)
4400{
Bartosz Folta83a77e92016-12-14 06:39:15 +00004401 const struct macb_config *macb_config = &default_gem_config;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004402 int (*clk_init)(struct platform_device *, struct clk **,
Harini Katakamf5473d12019-03-01 16:20:33 +05304403 struct clk **, struct clk **, struct clk **,
4404 struct clk **) = macb_config->clk_init;
Bartosz Folta83a77e92016-12-14 06:39:15 +00004405 int (*init)(struct platform_device *) = macb_config->init;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004406 struct device_node *np = pdev->dev.of_node;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304407 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
Harini Katakamf5473d12019-03-01 16:20:33 +05304408 struct clk *tsu_clk = NULL;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004409 unsigned int queue_mask, num_queues;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004410 bool native_io;
Andrew Lunn0c65b2b2019-11-04 02:40:33 +01004411 phy_interface_t interface;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004412 struct net_device *dev;
4413 struct resource *regs;
4414 void __iomem *mem;
4415 const char *mac;
4416 struct macb *bp;
Harini Katakam404cd082018-07-06 12:18:58 +05304417 int err, val;
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004418
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004419 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
4420 mem = devm_ioremap_resource(&pdev->dev, regs);
4421 if (IS_ERR(mem))
4422 return PTR_ERR(mem);
4423
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004424 if (np) {
4425 const struct of_device_id *match;
4426
4427 match = of_match_node(macb_dt_ids, np);
4428 if (match && match->data) {
4429 macb_config = match->data;
4430 clk_init = macb_config->clk_init;
4431 init = macb_config->init;
4432 }
4433 }
4434
Harini Katakamf5473d12019-03-01 16:20:33 +05304435 err = clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004436 if (err)
4437 return err;
4438
Harini Katakamd54f89a2019-03-01 16:20:34 +05304439 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
4440 pm_runtime_use_autosuspend(&pdev->dev);
4441 pm_runtime_get_noresume(&pdev->dev);
4442 pm_runtime_set_active(&pdev->dev);
4443 pm_runtime_enable(&pdev->dev);
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004444 native_io = hw_is_native_io(mem);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004445
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004446 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004447 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004448 if (!dev) {
4449 err = -ENOMEM;
4450 goto err_disable_clocks;
4451 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004452
4453 dev->base_addr = regs->start;
4454
4455 SET_NETDEV_DEV(dev, &pdev->dev);
4456
4457 bp = netdev_priv(dev);
4458 bp->pdev = pdev;
4459 bp->dev = dev;
4460 bp->regs = mem;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004461 bp->native_io = native_io;
4462 if (native_io) {
David S. Miller7a6e0702015-07-27 14:24:48 -07004463 bp->macb_reg_readl = hw_readl_native;
4464 bp->macb_reg_writel = hw_writel_native;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004465 } else {
David S. Miller7a6e0702015-07-27 14:24:48 -07004466 bp->macb_reg_readl = hw_readl;
4467 bp->macb_reg_writel = hw_writel;
Andy Shevchenkof2ce8a9e2015-07-24 21:23:59 +03004468 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004469 bp->num_queues = num_queues;
Nicolas Ferrebfa09142015-03-31 15:01:59 +02004470 bp->queue_mask = queue_mask;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004471 if (macb_config)
4472 bp->dma_burst_length = macb_config->dma_burst_length;
4473 bp->pclk = pclk;
4474 bp->hclk = hclk;
4475 bp->tx_clk = tx_clk;
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304476 bp->rx_clk = rx_clk;
Harini Katakamf5473d12019-03-01 16:20:33 +05304477 bp->tsu_clk = tsu_clk;
Andy Shevchenkof36dbe62015-07-24 21:24:00 +03004478 if (macb_config)
Harini Katakam98b5a0f42015-05-06 22:27:17 +05304479 bp->jumbo_max_len = macb_config->jumbo_max_len;
Harini Katakam98b5a0f42015-05-06 22:27:17 +05304480
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004481 bp->wol = 0;
Sergio Prado7c4a1d02016-02-16 21:10:45 -02004482 if (of_get_property(np, "magic-packet", NULL))
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004483 bp->wol |= MACB_WOL_HAS_MAGIC_PACKET;
Nicolas Ferreced47992020-07-10 14:46:42 +02004484 device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004485
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004486 spin_lock_init(&bp->lock);
4487
Nicolas Ferread783472015-03-31 15:02:02 +02004488 /* setup capabilities */
Nicolas Ferref6970502015-03-31 15:02:01 +02004489 macb_configure_caps(bp, macb_config);
4490
Rafal Ozieblo7b429612017-06-29 07:12:51 +01004491#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
4492 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
4493 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
4494 bp->hw_dma_cap |= HW_DMA_CAP_64B;
4495 }
4496#endif
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004497 platform_set_drvdata(pdev, dev);
4498
4499 dev->irq = platform_get_irq(pdev, 0);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004500 if (dev->irq < 0) {
4501 err = dev->irq;
Wei Yongjunb22ae0b2016-08-12 15:43:54 +00004502 goto err_out_free_netdev;
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004503 }
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004504
Jarod Wilson44770e12016-10-17 15:54:17 -04004505 /* MTU range: 68 - 1500 or 10240 */
4506 dev->min_mtu = GEM_MTU_MIN_SIZE;
4507 if (bp->caps & MACB_CAPS_JUMBO)
4508 dev->max_mtu = gem_readl(bp, JML) - ETH_HLEN - ETH_FCS_LEN;
4509 else
4510 dev->max_mtu = ETH_DATA_LEN;
4511
Harini Katakam404cd082018-07-06 12:18:58 +05304512 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
4513 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
4514 if (val)
4515 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
4516 macb_dma_desc_get_size(bp);
4517
4518 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
4519 if (val)
4520 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
4521 macb_dma_desc_get_size(bp);
4522 }
4523
Harini Katakame5010702019-01-29 15:20:03 +05304524 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
4525 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
4526 bp->rx_intr_mask |= MACB_BIT(RXUBR);
4527
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004528 mac = of_get_mac_address(np);
Petr Å tetiar541ddc62019-05-03 16:27:08 +02004529 if (PTR_ERR(mac) == -EPROBE_DEFER) {
4530 err = -EPROBE_DEFER;
4531 goto err_out_free_netdev;
Antoine Tenart2bf4ecb2019-06-21 17:26:35 +02004532 } else if (!IS_ERR_OR_NULL(mac)) {
Moritz Fischereefb52d2016-03-29 19:11:14 -07004533 ether_addr_copy(bp->dev->dev_addr, mac);
Mike Looijmansaa076e32018-03-29 07:29:49 +02004534 } else {
Petr Å tetiar541ddc62019-05-03 16:27:08 +02004535 macb_get_hwaddr(bp);
Mike Looijmansaa076e32018-03-29 07:29:49 +02004536 }
frederic RODO6c36a702007-07-12 19:07:24 +02004537
Andrew Lunn0c65b2b2019-11-04 02:40:33 +01004538 err = of_get_phy_mode(np, &interface);
4539 if (err)
Nicolas Ferre8b952742019-05-03 12:36:58 +02004540 /* not found in DT, MII by default */
4541 bp->phy_interface = PHY_INTERFACE_MODE_MII;
4542 else
Andrew Lunn0c65b2b2019-11-04 02:40:33 +01004543 bp->phy_interface = interface;
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01004544
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004545 /* IP specific init */
4546 err = init(pdev);
4547 if (err)
4548 goto err_out_free_netdev;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004549
Florian Fainellicf669662016-05-02 18:38:45 -07004550 err = macb_mii_init(bp);
4551 if (err)
4552 goto err_out_free_netdev;
4553
Florian Fainellicf669662016-05-02 18:38:45 -07004554 netif_carrier_off(dev);
4555
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004556 err = register_netdev(dev);
4557 if (err) {
4558 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
Florian Fainellicf669662016-05-02 18:38:45 -07004559 goto err_out_unregister_mdio;
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004560 }
4561
Allen Paise7412b82020-09-14 12:59:23 +05304562 tasklet_setup(&bp->hresp_err_tasklet, macb_hresp_error_task);
Harini Katakam032dc412018-01-27 12:09:01 +05304563
Bo Shen58798232014-09-13 01:57:49 +02004564 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
4565 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
4566 dev->base_addr, dev->irq, dev->dev_addr);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004567
Harini Katakamd54f89a2019-03-01 16:20:34 +05304568 pm_runtime_mark_last_busy(&bp->pdev->dev);
4569 pm_runtime_put_autosuspend(&bp->pdev->dev);
4570
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004571 return 0;
4572
Florian Fainellicf669662016-05-02 18:38:45 -07004573err_out_unregister_mdio:
Florian Fainellicf669662016-05-02 18:38:45 -07004574 mdiobus_unregister(bp->mii_bus);
4575 mdiobus_free(bp->mii_bus);
4576
Cyrille Pitchencf250de2014-12-15 15:13:32 +01004577err_out_free_netdev:
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004578 free_netdev(dev);
Cyrille Pitchen421d9df2015-03-07 07:23:32 +01004579
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004580err_disable_clocks:
4581 clk_disable_unprepare(tx_clk);
4582 clk_disable_unprepare(hclk);
4583 clk_disable_unprepare(pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304584 clk_disable_unprepare(rx_clk);
Harini Katakamf5473d12019-03-01 16:20:33 +05304585 clk_disable_unprepare(tsu_clk);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304586 pm_runtime_disable(&pdev->dev);
4587 pm_runtime_set_suspended(&pdev->dev);
4588 pm_runtime_dont_use_autosuspend(&pdev->dev);
Nicolas Ferrec69618b2015-03-31 15:02:03 +02004589
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004590 return err;
4591}
4592
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004593static int macb_remove(struct platform_device *pdev)
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004594{
4595 struct net_device *dev;
4596 struct macb *bp;
4597
4598 dev = platform_get_drvdata(pdev);
4599
4600 if (dev) {
4601 bp = netdev_priv(dev);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07004602 mdiobus_unregister(bp->mii_bus);
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07004603 mdiobus_free(bp->mii_bus);
Gregory CLEMENT5833e052015-12-11 11:34:53 +01004604
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004605 unregister_netdev(dev);
Chuhong Yuan61183b02019-11-28 10:00:21 +08004606 tasklet_kill(&bp->hresp_err_tasklet);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304607 pm_runtime_disable(&pdev->dev);
4608 pm_runtime_dont_use_autosuspend(&pdev->dev);
4609 if (!pm_runtime_suspended(&pdev->dev)) {
4610 clk_disable_unprepare(bp->tx_clk);
4611 clk_disable_unprepare(bp->hclk);
4612 clk_disable_unprepare(bp->pclk);
4613 clk_disable_unprepare(bp->rx_clk);
4614 clk_disable_unprepare(bp->tsu_clk);
4615 pm_runtime_set_suspended(&pdev->dev);
4616 }
Antoine Tenart7897b072019-11-13 10:00:06 +01004617 phylink_destroy(bp->phylink);
Cyrille Pitchene965be72014-12-15 15:13:31 +01004618 free_netdev(dev);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004619 }
4620
4621 return 0;
4622}
4623
Michal Simekd23823d2015-01-23 09:36:03 +01004624static int __maybe_unused macb_suspend(struct device *dev)
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004625{
Wolfram Sangce886a42018-10-21 22:00:14 +02004626 struct net_device *netdev = dev_get_drvdata(dev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004627 struct macb *bp = netdev_priv(netdev);
Harini Katakamde991c52019-03-01 16:20:35 +05304628 struct macb_queue *queue = bp->queues;
4629 unsigned long flags;
4630 unsigned int q;
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004631 int err;
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004632
Harini Katakamde991c52019-03-01 16:20:35 +05304633 if (!netif_running(netdev))
4634 return 0;
4635
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004636 if (bp->wol & MACB_WOL_ENABLED) {
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004637 spin_lock_irqsave(&bp->lock, flags);
4638 /* Flush all status bits */
4639 macb_writel(bp, TSR, -1);
4640 macb_writel(bp, RSR, -1);
Harini Katakamde991c52019-03-01 16:20:35 +05304641 for (q = 0, queue = bp->queues; q < bp->num_queues;
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004642 ++q, ++queue) {
4643 /* Disable all interrupts */
4644 queue_writel(queue, IDR, -1);
4645 queue_readl(queue, ISR);
4646 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4647 queue_writel(queue, ISR, -1);
4648 }
4649 /* Change interrupt handler and
4650 * Enable WoL IRQ on queue 0
4651 */
Nicolas Ferre9d45c8e2020-07-20 10:56:53 +02004652 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004653 if (macb_is_gem(bp)) {
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004654 err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
4655 IRQF_SHARED, netdev->name, bp->queues);
4656 if (err) {
4657 dev_err(dev,
4658 "Unable to request IRQ %d (error %d)\n",
4659 bp->queues[0].irq, err);
4660 spin_unlock_irqrestore(&bp->lock, flags);
4661 return err;
4662 }
4663 queue_writel(bp->queues, IER, GEM_BIT(WOL));
4664 gem_writel(bp, WOL, MACB_BIT(MAG));
4665 } else {
Nicolas Ferre9d45c8e2020-07-20 10:56:53 +02004666 err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
4667 IRQF_SHARED, netdev->name, bp->queues);
4668 if (err) {
4669 dev_err(dev,
4670 "Unable to request IRQ %d (error %d)\n",
4671 bp->queues[0].irq, err);
4672 spin_unlock_irqrestore(&bp->lock, flags);
4673 return err;
4674 }
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004675 queue_writel(bp->queues, IER, MACB_BIT(WOL));
4676 macb_writel(bp, WOL, MACB_BIT(MAG));
4677 }
4678 spin_unlock_irqrestore(&bp->lock, flags);
4679
4680 enable_irq_wake(bp->queues[0].irq);
4681 }
4682
4683 netif_device_detach(netdev);
4684 for (q = 0, queue = bp->queues; q < bp->num_queues;
4685 ++q, ++queue)
4686 napi_disable(&queue->napi);
4687
4688 if (!(bp->wol & MACB_WOL_ENABLED)) {
Antoine Tenart7897b072019-11-13 10:00:06 +01004689 rtnl_lock();
4690 phylink_stop(bp->phylink);
4691 rtnl_unlock();
Harini Katakamde991c52019-03-01 16:20:35 +05304692 spin_lock_irqsave(&bp->lock, flags);
4693 macb_reset_hw(bp);
4694 spin_unlock_irqrestore(&bp->lock, flags);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304695 }
4696
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004697 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4698 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
4699
4700 if (netdev->hw_features & NETIF_F_NTUPLE)
4701 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
4702
Harini Katakamde991c52019-03-01 16:20:35 +05304703 if (bp->ptp_info)
4704 bp->ptp_info->ptp_remove(netdev);
Nicolas Ferre6c8f85c2020-07-10 14:46:45 +02004705 if (!device_may_wakeup(dev))
4706 pm_runtime_force_suspend(dev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304707
4708 return 0;
4709}
4710
4711static int __maybe_unused macb_resume(struct device *dev)
4712{
4713 struct net_device *netdev = dev_get_drvdata(dev);
4714 struct macb *bp = netdev_priv(netdev);
Harini Katakamde991c52019-03-01 16:20:35 +05304715 struct macb_queue *queue = bp->queues;
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004716 unsigned long flags;
Harini Katakamde991c52019-03-01 16:20:35 +05304717 unsigned int q;
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004718 int err;
Harini Katakamde991c52019-03-01 16:20:35 +05304719
4720 if (!netif_running(netdev))
4721 return 0;
Harini Katakamd54f89a2019-03-01 16:20:34 +05304722
Nicolas Ferre6c8f85c2020-07-10 14:46:45 +02004723 if (!device_may_wakeup(dev))
4724 pm_runtime_force_resume(dev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304725
4726 if (bp->wol & MACB_WOL_ENABLED) {
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004727 spin_lock_irqsave(&bp->lock, flags);
4728 /* Disable WoL */
4729 if (macb_is_gem(bp)) {
4730 queue_writel(bp->queues, IDR, GEM_BIT(WOL));
4731 gem_writel(bp, WOL, 0);
4732 } else {
4733 queue_writel(bp->queues, IDR, MACB_BIT(WOL));
4734 macb_writel(bp, WOL, 0);
4735 }
4736 /* Clear ISR on queue 0 */
4737 queue_readl(bp->queues, ISR);
4738 if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
4739 queue_writel(bp->queues, ISR, -1);
4740 /* Replace interrupt handler on queue 0 */
4741 devm_free_irq(dev, bp->queues[0].irq, bp->queues);
4742 err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
4743 IRQF_SHARED, netdev->name, bp->queues);
4744 if (err) {
4745 dev_err(dev,
4746 "Unable to request IRQ %d (error %d)\n",
4747 bp->queues[0].irq, err);
4748 spin_unlock_irqrestore(&bp->lock, flags);
4749 return err;
4750 }
4751 spin_unlock_irqrestore(&bp->lock, flags);
4752
Harini Katakamd54f89a2019-03-01 16:20:34 +05304753 disable_irq_wake(bp->queues[0].irq);
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00004754
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004755 /* Now make sure we disable phy before moving
4756 * to common restore path
4757 */
Antoine Tenart7897b072019-11-13 10:00:06 +01004758 rtnl_lock();
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004759 phylink_stop(bp->phylink);
Antoine Tenart7897b072019-11-13 10:00:06 +01004760 rtnl_unlock();
Harini Katakamd54f89a2019-03-01 16:20:34 +05304761 }
4762
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004763 for (q = 0, queue = bp->queues; q < bp->num_queues;
4764 ++q, ++queue)
4765 napi_enable(&queue->napi);
4766
4767 if (netdev->hw_features & NETIF_F_NTUPLE)
4768 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
4769
4770 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
4771 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
4772
4773 macb_writel(bp, NCR, MACB_BIT(MPE));
Harini Katakamde991c52019-03-01 16:20:35 +05304774 macb_init_hw(bp);
4775 macb_set_rx_mode(netdev);
Claudiu Bezneac1e85c6c2019-05-22 08:24:43 +00004776 macb_restore_features(bp);
Nicolas Ferre558e35c2020-07-20 10:56:52 +02004777 rtnl_lock();
4778 phylink_start(bp->phylink);
4779 rtnl_unlock();
4780
Harini Katakamd54f89a2019-03-01 16:20:34 +05304781 netif_device_attach(netdev);
Harini Katakamde991c52019-03-01 16:20:35 +05304782 if (bp->ptp_info)
4783 bp->ptp_info->ptp_init(netdev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304784
4785 return 0;
4786}
4787
4788static int __maybe_unused macb_runtime_suspend(struct device *dev)
4789{
Wolfram Sangf9cb7592019-03-19 17:36:34 +01004790 struct net_device *netdev = dev_get_drvdata(dev);
Harini Katakamd54f89a2019-03-01 16:20:34 +05304791 struct macb *bp = netdev_priv(netdev);
4792
Nicolas Ferre515a10a2020-07-10 14:46:41 +02004793 if (!(device_may_wakeup(dev))) {
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004794 clk_disable_unprepare(bp->tx_clk);
4795 clk_disable_unprepare(bp->hclk);
4796 clk_disable_unprepare(bp->pclk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304797 clk_disable_unprepare(bp->rx_clk);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004798 }
Harini Katakamf5473d12019-03-01 16:20:33 +05304799 clk_disable_unprepare(bp->tsu_clk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004800
4801 return 0;
4802}
4803
Harini Katakamd54f89a2019-03-01 16:20:34 +05304804static int __maybe_unused macb_runtime_resume(struct device *dev)
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004805{
Wolfram Sangf9cb7592019-03-19 17:36:34 +01004806 struct net_device *netdev = dev_get_drvdata(dev);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004807 struct macb *bp = netdev_priv(netdev);
4808
Nicolas Ferre515a10a2020-07-10 14:46:41 +02004809 if (!(device_may_wakeup(dev))) {
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004810 clk_prepare_enable(bp->pclk);
4811 clk_prepare_enable(bp->hclk);
4812 clk_prepare_enable(bp->tx_clk);
shubhrajyoti.datta@xilinx.comaead88b2016-08-16 10:14:50 +05304813 clk_prepare_enable(bp->rx_clk);
Sergio Prado3e2a5e12016-02-09 12:07:16 -02004814 }
Harini Katakamf5473d12019-03-01 16:20:33 +05304815 clk_prepare_enable(bp->tsu_clk);
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004816
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004817 return 0;
4818}
Haavard Skinnemoenc1f598f2008-03-04 13:39:29 +01004819
Harini Katakamd54f89a2019-03-01 16:20:34 +05304820static const struct dev_pm_ops macb_pm_ops = {
4821 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
4822 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
4823};
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004824
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004825static struct platform_driver macb_driver = {
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004826 .probe = macb_probe,
4827 .remove = macb_remove,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004828 .driver = {
4829 .name = "macb",
Jean-Christophe PLAGNIOL-VILLARDfb97a842011-11-18 15:29:25 +01004830 .of_match_table = of_match_ptr(macb_dt_ids),
Soren Brinkmann0dfc3e12013-12-10 16:07:19 -08004831 .pm = &macb_pm_ops,
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004832 },
4833};
4834
Nicolae Rosia9e86d7662015-01-22 17:31:05 +00004835module_platform_driver(macb_driver);
Haavard Skinnemoen89e57852006-11-09 14:51:17 +01004836
4837MODULE_LICENSE("GPL");
Jamie Ilesf75ba502011-11-08 10:12:32 +00004838MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
Jean Delvaree05503e2011-05-18 16:49:24 +02004839MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
Kay Sievers72abb462008-04-18 13:50:44 -07004840MODULE_ALIAS("platform:macb");