blob: cd85bb7c4700644867fe1dea305c1af9d9d91107 [file] [log] [blame]
Grygorii Strashko68cf0272019-04-26 20:12:23 +03001// SPDX-License-Identifier: GPL-2.0
Mugunthan V Ndf828592012-03-18 20:17:54 +00002/*
3 * Texas Instruments Ethernet Switch Driver
4 *
5 * Copyright (C) 2012 Texas Instruments
6 *
Mugunthan V Ndf828592012-03-18 20:17:54 +00007 */
8
9#include <linux/kernel.h>
10#include <linux/io.h>
11#include <linux/clk.h>
12#include <linux/timer.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/irqreturn.h>
16#include <linux/interrupt.h>
17#include <linux/if_ether.h>
18#include <linux/etherdevice.h>
19#include <linux/netdevice.h>
Richard Cochran2e5b38a2012-10-29 08:45:20 +000020#include <linux/net_tstamp.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000021#include <linux/phy.h>
Grygorii Strashko3ff18842018-11-25 18:15:25 -060022#include <linux/phy/phy.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000023#include <linux/workqueue.h>
24#include <linux/delay.h>
Mugunthan V Nf150bd72012-07-17 08:09:50 +000025#include <linux/pm_runtime.h>
Arnd Bergmanne2b3e492018-05-30 23:51:54 +020026#include <linux/gpio/consumer.h>
Mugunthan V N2eb32b02012-07-30 10:17:14 +000027#include <linux/of.h>
Heiko Schocher9e42f712015-10-17 06:04:35 +020028#include <linux/of_mdio.h>
Mugunthan V N2eb32b02012-07-30 10:17:14 +000029#include <linux/of_net.h>
30#include <linux/of_device.h>
Mugunthan V N3b72c2f2013-02-05 08:26:48 +000031#include <linux/if_vlan.h>
Randy Dunlap514c6032018-04-05 16:25:34 -070032#include <linux/kmemleak.h>
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +030033#include <linux/sys_soc.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000034
Mugunthan V N739683b2013-06-06 23:45:14 +053035#include <linux/pinctrl/consumer.h>
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +030036#include <net/pkt_cls.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000037
Mugunthan V Ndbe34722013-08-19 17:47:40 +053038#include "cpsw.h"
Mugunthan V Ndf828592012-03-18 20:17:54 +000039#include "cpsw_ale.h"
Richard Cochran2e5b38a2012-10-29 08:45:20 +000040#include "cpts.h"
Mugunthan V Ndf828592012-03-18 20:17:54 +000041#include "davinci_cpdma.h"
42
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +030043#include <net/pkt_sched.h>
44
Mugunthan V Ndf828592012-03-18 20:17:54 +000045#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
46 NETIF_MSG_DRV | NETIF_MSG_LINK | \
47 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
48 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
49 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
50 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
51 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
52 NETIF_MSG_RX_STATUS)
53
54#define cpsw_info(priv, type, format, ...) \
55do { \
56 if (netif_msg_##type(priv) && net_ratelimit()) \
57 dev_info(priv->dev, format, ## __VA_ARGS__); \
58} while (0)
59
60#define cpsw_err(priv, type, format, ...) \
61do { \
62 if (netif_msg_##type(priv) && net_ratelimit()) \
63 dev_err(priv->dev, format, ## __VA_ARGS__); \
64} while (0)
65
66#define cpsw_dbg(priv, type, format, ...) \
67do { \
68 if (netif_msg_##type(priv) && net_ratelimit()) \
69 dev_dbg(priv->dev, format, ## __VA_ARGS__); \
70} while (0)
71
72#define cpsw_notice(priv, type, format, ...) \
73do { \
74 if (netif_msg_##type(priv) && net_ratelimit()) \
75 dev_notice(priv->dev, format, ## __VA_ARGS__); \
76} while (0)
77
Mugunthan V N5c50a852012-10-29 08:45:11 +000078#define ALE_ALL_PORTS 0x7
79
Mugunthan V Ndf828592012-03-18 20:17:54 +000080#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
81#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
82#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
83
Richard Cochrane90cfac2012-10-29 08:45:14 +000084#define CPSW_VERSION_1 0x19010a
85#define CPSW_VERSION_2 0x19010c
Mugunthan V Nc193f362013-08-05 17:30:05 +053086#define CPSW_VERSION_3 0x19010f
Mugunthan V N926489b2013-08-12 17:11:15 +053087#define CPSW_VERSION_4 0x190112
Richard Cochran549985e2012-11-14 09:07:56 +000088
89#define HOST_PORT_NUM 0
Grygorii Strashkoc6395f12017-11-30 18:21:14 -060090#define CPSW_ALE_PORTS_NUM 3
Richard Cochran549985e2012-11-14 09:07:56 +000091#define SLIVER_SIZE 0x40
92
93#define CPSW1_HOST_PORT_OFFSET 0x028
94#define CPSW1_SLAVE_OFFSET 0x050
95#define CPSW1_SLAVE_SIZE 0x040
96#define CPSW1_CPDMA_OFFSET 0x100
97#define CPSW1_STATERAM_OFFSET 0x200
Mugunthan V Nd9718542013-07-23 15:38:17 +053098#define CPSW1_HW_STATS 0x400
Richard Cochran549985e2012-11-14 09:07:56 +000099#define CPSW1_CPTS_OFFSET 0x500
100#define CPSW1_ALE_OFFSET 0x600
101#define CPSW1_SLIVER_OFFSET 0x700
102
103#define CPSW2_HOST_PORT_OFFSET 0x108
104#define CPSW2_SLAVE_OFFSET 0x200
105#define CPSW2_SLAVE_SIZE 0x100
106#define CPSW2_CPDMA_OFFSET 0x800
Mugunthan V Nd9718542013-07-23 15:38:17 +0530107#define CPSW2_HW_STATS 0x900
Richard Cochran549985e2012-11-14 09:07:56 +0000108#define CPSW2_STATERAM_OFFSET 0xa00
109#define CPSW2_CPTS_OFFSET 0xc00
110#define CPSW2_ALE_OFFSET 0xd00
111#define CPSW2_SLIVER_OFFSET 0xd80
112#define CPSW2_BD_OFFSET 0x2000
113
Mugunthan V Ndf828592012-03-18 20:17:54 +0000114#define CPDMA_RXTHRESH 0x0c0
115#define CPDMA_RXFREE 0x0e0
116#define CPDMA_TXHDP 0x00
117#define CPDMA_RXHDP 0x20
118#define CPDMA_TXCP 0x40
119#define CPDMA_RXCP 0x60
120
Mugunthan V Ndf828592012-03-18 20:17:54 +0000121#define CPSW_POLL_WEIGHT 64
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500122#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
Grygorii Strashko9421c902017-11-15 09:46:35 -0600123#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500124#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
125 ETH_FCS_LEN +\
126 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000127
128#define RX_PRIORITY_MAPPING 0x76543210
129#define TX_PRIORITY_MAPPING 0x33221100
Ivan Khoronzhuk5e391dc52018-04-19 22:49:09 +0300130#define CPDMA_TX_PRIORITY_MAP 0x76543210
Mugunthan V Ndf828592012-03-18 20:17:54 +0000131
Mugunthan V N3b72c2f2013-02-05 08:26:48 +0000132#define CPSW_VLAN_AWARE BIT(1)
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500133#define CPSW_RX_VLAN_ENCAP BIT(2)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +0000134#define CPSW_ALE_VLAN_AWARE 1
135
John Ogness35717d82014-11-14 15:42:52 +0100136#define CPSW_FIFO_NORMAL_MODE (0 << 16)
137#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
138#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000139
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +0000140#define CPSW_INTPACEEN (0x3f << 16)
141#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
142#define CPSW_CMINTMAX_CNT 63
143#define CPSW_CMINTMIN_CNT 2
144#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
145#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
146
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300147#define cpsw_slave_index(cpsw, priv) \
148 ((cpsw->data.dual_emac) ? priv->emac_port : \
149 cpsw->data.active_slave)
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +0300150#define IRQ_NUM 2
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300151#define CPSW_MAX_QUEUES 8
Grygorii Strashko90225bf2017-01-06 14:07:33 -0600152#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +0300153#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
154#define CPSW_FIFO_SHAPE_EN_SHIFT 16
155#define CPSW_FIFO_RATE_EN_SHIFT 20
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +0300156#define CPSW_TC_NUM 4
157#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +0300158#define CPSW_PCT_MASK 0x7f
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +0000159
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500160#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
161#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
162#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
163#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
164#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
165enum {
166 CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
167 CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
168 CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
169 CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
170};
171
Mugunthan V Ndf828592012-03-18 20:17:54 +0000172static int debug_level;
173module_param(debug_level, int, 0);
174MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
175
176static int ale_ageout = 10;
177module_param(ale_ageout, int, 0);
178MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
179
180static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
181module_param(rx_packet_max, int, 0);
182MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
183
Grygorii Strashko90225bf2017-01-06 14:07:33 -0600184static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
185module_param(descs_pool_size, int, 0444);
186MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
187
Richard Cochran996a5c22012-10-29 08:45:12 +0000188struct cpsw_wr_regs {
Mugunthan V Ndf828592012-03-18 20:17:54 +0000189 u32 id_ver;
190 u32 soft_reset;
191 u32 control;
192 u32 int_control;
193 u32 rx_thresh_en;
194 u32 rx_en;
195 u32 tx_en;
196 u32 misc_en;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +0000197 u32 mem_allign1[8];
198 u32 rx_thresh_stat;
199 u32 rx_stat;
200 u32 tx_stat;
201 u32 misc_stat;
202 u32 mem_allign2[8];
203 u32 rx_imax;
204 u32 tx_imax;
205
Mugunthan V Ndf828592012-03-18 20:17:54 +0000206};
207
Richard Cochran996a5c22012-10-29 08:45:12 +0000208struct cpsw_ss_regs {
Mugunthan V Ndf828592012-03-18 20:17:54 +0000209 u32 id_ver;
210 u32 control;
211 u32 soft_reset;
212 u32 stat_port_en;
213 u32 ptype;
Richard Cochranbd357af2012-10-29 08:45:13 +0000214 u32 soft_idle;
215 u32 thru_rate;
216 u32 gap_thresh;
217 u32 tx_start_wds;
218 u32 flow_control;
219 u32 vlan_ltype;
220 u32 ts_ltype;
221 u32 dlr_ltype;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000222};
223
Richard Cochran9750a3a2012-10-29 08:45:15 +0000224/* CPSW_PORT_V1 */
225#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
226#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
227#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
228#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
229#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
230#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
231#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
232#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
233
234/* CPSW_PORT_V2 */
235#define CPSW2_CONTROL 0x00 /* Control Register */
236#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
237#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
238#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
239#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
240#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
241#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
242
243/* CPSW_PORT_V1 and V2 */
244#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
245#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
246#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
247
248/* CPSW_PORT_V2 only */
249#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
250#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
251#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
252#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
253#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
254#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
255#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
256#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
257
258/* Bit definitions for the CPSW2_CONTROL register */
Ivan Khoronzhuk1239a962018-07-06 21:44:44 +0300259#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
260#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
261#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
262#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
Ivan Khoronzhuk1c0e8122018-07-06 21:44:45 +0300263#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
Ivan Khoronzhuk1239a962018-07-06 21:44:44 +0300264#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
265#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
266#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
267#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
268#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
269#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
270#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
271#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
272#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
273#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
274#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
275#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
276#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
Richard Cochran9750a3a2012-10-29 08:45:15 +0000277
George Cherian09c55372014-05-02 12:02:02 +0530278#define CTRL_V2_TS_BITS \
279 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
Ivan Khoronzhuk1ebb2442018-11-12 16:00:23 +0200280 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN)
Richard Cochran9750a3a2012-10-29 08:45:15 +0000281
George Cherian09c55372014-05-02 12:02:02 +0530282#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
283#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
284#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
285
286
287#define CTRL_V3_TS_BITS \
Ivan Khoronzhuk1c0e8122018-07-06 21:44:45 +0300288 (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
George Cherian09c55372014-05-02 12:02:02 +0530289 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
Ivan Khoronzhuk1ebb2442018-11-12 16:00:23 +0200290 TS_LTYPE1_EN | VLAN_LTYPE1_EN)
George Cherian09c55372014-05-02 12:02:02 +0530291
292#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
293#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
294#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
Richard Cochran9750a3a2012-10-29 08:45:15 +0000295
296/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
297#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
298#define TS_SEQ_ID_OFFSET_MASK (0x3f)
299#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
300#define TS_MSG_TYPE_EN_MASK (0xffff)
301
302/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
303#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
Mugunthan V Ndf828592012-03-18 20:17:54 +0000304
Richard Cochran2e5b38a2012-10-29 08:45:20 +0000305/* Bit definitions for the CPSW1_TS_CTL register */
306#define CPSW_V1_TS_RX_EN BIT(0)
307#define CPSW_V1_TS_TX_EN BIT(4)
308#define CPSW_V1_MSG_TYPE_OFS 16
309
310/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
311#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
312
Grygorii Strashko48f5bcc2017-05-08 14:21:21 -0500313#define CPSW_MAX_BLKS_TX 15
314#define CPSW_MAX_BLKS_TX_SHIFT 4
315#define CPSW_MAX_BLKS_RX 5
316
Mugunthan V Ndf828592012-03-18 20:17:54 +0000317struct cpsw_host_regs {
318 u32 max_blks;
319 u32 blk_cnt;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000320 u32 tx_in_ctl;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000321 u32 port_vlan;
322 u32 tx_pri_map;
323 u32 cpdma_tx_pri_map;
324 u32 cpdma_rx_chan_map;
325};
326
327struct cpsw_sliver_regs {
328 u32 id_ver;
329 u32 mac_control;
330 u32 mac_status;
331 u32 soft_reset;
332 u32 rx_maxlen;
333 u32 __reserved_0;
334 u32 rx_pause;
335 u32 tx_pause;
336 u32 __reserved_1;
337 u32 rx_pri_map;
338};
339
Mugunthan V Nd9718542013-07-23 15:38:17 +0530340struct cpsw_hw_stats {
341 u32 rxgoodframes;
342 u32 rxbroadcastframes;
343 u32 rxmulticastframes;
344 u32 rxpauseframes;
345 u32 rxcrcerrors;
346 u32 rxaligncodeerrors;
347 u32 rxoversizedframes;
348 u32 rxjabberframes;
349 u32 rxundersizedframes;
350 u32 rxfragments;
351 u32 __pad_0[2];
352 u32 rxoctets;
353 u32 txgoodframes;
354 u32 txbroadcastframes;
355 u32 txmulticastframes;
356 u32 txpauseframes;
357 u32 txdeferredframes;
358 u32 txcollisionframes;
359 u32 txsinglecollframes;
360 u32 txmultcollframes;
361 u32 txexcessivecollisions;
362 u32 txlatecollisions;
363 u32 txunderrun;
364 u32 txcarriersenseerrors;
365 u32 txoctets;
366 u32 octetframes64;
367 u32 octetframes65t127;
368 u32 octetframes128t255;
369 u32 octetframes256t511;
370 u32 octetframes512t1023;
371 u32 octetframes1024tup;
372 u32 netoctets;
373 u32 rxsofoverruns;
374 u32 rxmofoverruns;
375 u32 rxdmaoverruns;
376};
377
Grygorii Strashko2c8a14d2017-11-30 18:21:12 -0600378struct cpsw_slave_data {
379 struct device_node *phy_node;
380 char phy_id[MII_BUS_ID_SIZE];
381 int phy_if;
382 u8 mac_addr[ETH_ALEN];
383 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
Grygorii Strashko3ff18842018-11-25 18:15:25 -0600384 struct phy *ifphy;
Grygorii Strashko2c8a14d2017-11-30 18:21:12 -0600385};
386
387struct cpsw_platform_data {
388 struct cpsw_slave_data *slave_data;
389 u32 ss_reg_ofs; /* Subsystem control register offset */
390 u32 channels; /* number of cpdma channels (symmetric) */
391 u32 slaves; /* number of slave cpgmac ports */
392 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
393 u32 ale_entries; /* ale table size */
394 u32 bd_ram_size; /*buffer descriptor ram size */
395 u32 mac_control; /* Mac control register */
396 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
397 bool dual_emac; /* Enable Dual EMAC mode */
398};
399
Mugunthan V Ndf828592012-03-18 20:17:54 +0000400struct cpsw_slave {
Richard Cochran9750a3a2012-10-29 08:45:15 +0000401 void __iomem *regs;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000402 struct cpsw_sliver_regs __iomem *sliver;
403 int slave_num;
404 u32 mac_control;
405 struct cpsw_slave_data *data;
406 struct phy_device *phy;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000407 struct net_device *ndev;
408 u32 port_vlan;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000409};
410
Richard Cochran9750a3a2012-10-29 08:45:15 +0000411static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
412{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600413 return readl_relaxed(slave->regs + offset);
Richard Cochran9750a3a2012-10-29 08:45:15 +0000414}
415
416static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
417{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600418 writel_relaxed(val, slave->regs + offset);
Richard Cochran9750a3a2012-10-29 08:45:15 +0000419}
420
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200421struct cpsw_vector {
422 struct cpdma_chan *ch;
423 int budget;
424};
425
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300426struct cpsw_common {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +0300427 struct device *dev;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300428 struct cpsw_platform_data data;
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300429 struct napi_struct napi_rx;
430 struct napi_struct napi_tx;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +0300431 struct cpsw_ss_regs __iomem *regs;
432 struct cpsw_wr_regs __iomem *wr_regs;
433 u8 __iomem *hw_stats;
434 struct cpsw_host_regs __iomem *host_port_regs;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300435 u32 version;
436 u32 coal_intvl;
437 u32 bus_freq_mhz;
438 int rx_packet_max;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300439 struct cpsw_slave *slaves;
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300440 struct cpdma_ctlr *dma;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200441 struct cpsw_vector txv[CPSW_MAX_QUEUES];
442 struct cpsw_vector rxv[CPSW_MAX_QUEUES];
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300443 struct cpsw_ale *ale;
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +0300444 bool quirk_irq;
445 bool rx_irq_disabled;
446 bool tx_irq_disabled;
447 u32 irqs_table[IRQ_NUM];
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300448 struct cpts *cpts;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300449 int rx_ch_num, tx_ch_num;
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +0200450 int speed;
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +0200451 int usage_count;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300452};
453
454struct cpsw_priv {
Mugunthan V Ndf828592012-03-18 20:17:54 +0000455 struct net_device *ndev;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000456 struct device *dev;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000457 u32 msg_enable;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000458 u8 mac_addr[ETH_ALEN];
Mugunthan V N1923d6e2014-09-08 22:54:02 +0530459 bool rx_pause;
460 bool tx_pause;
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +0300461 bool mqprio_hw;
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +0300462 int fifo_bw[CPSW_TC_NUM];
463 int shp_cfg_speed;
Ivan Khoronzhuka9423122018-11-12 16:00:22 +0200464 int tx_ts_enabled;
465 int rx_ts_enabled;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000466 u32 emac_port;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300467 struct cpsw_common *cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000468};
469
Mugunthan V Nd9718542013-07-23 15:38:17 +0530470struct cpsw_stats {
471 char stat_string[ETH_GSTRING_LEN];
472 int type;
473 int sizeof_stat;
474 int stat_offset;
475};
476
477enum {
478 CPSW_STATS,
479 CPDMA_RX_STATS,
480 CPDMA_TX_STATS,
481};
482
483#define CPSW_STAT(m) CPSW_STATS, \
zhong jianga90546e2018-09-19 19:32:14 +0800484 FIELD_SIZEOF(struct cpsw_hw_stats, m), \
Mugunthan V Nd9718542013-07-23 15:38:17 +0530485 offsetof(struct cpsw_hw_stats, m)
486#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
zhong jianga90546e2018-09-19 19:32:14 +0800487 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
Mugunthan V Nd9718542013-07-23 15:38:17 +0530488 offsetof(struct cpdma_chan_stats, m)
489#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
zhong jianga90546e2018-09-19 19:32:14 +0800490 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
Mugunthan V Nd9718542013-07-23 15:38:17 +0530491 offsetof(struct cpdma_chan_stats, m)
492
493static const struct cpsw_stats cpsw_gstrings_stats[] = {
494 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
495 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
496 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
497 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
498 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
499 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
500 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
501 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
502 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
503 { "Rx Fragments", CPSW_STAT(rxfragments) },
504 { "Rx Octets", CPSW_STAT(rxoctets) },
505 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
506 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
507 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
508 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
509 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
510 { "Collisions", CPSW_STAT(txcollisionframes) },
511 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
512 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
513 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
514 { "Late Collisions", CPSW_STAT(txlatecollisions) },
515 { "Tx Underrun", CPSW_STAT(txunderrun) },
516 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
517 { "Tx Octets", CPSW_STAT(txoctets) },
518 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
519 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
520 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
521 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
522 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
523 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
524 { "Net Octets", CPSW_STAT(netoctets) },
525 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
526 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
527 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
Mugunthan V Nd9718542013-07-23 15:38:17 +0530528};
529
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300530static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
531 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
532 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
533 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
534 { "misqueued", CPDMA_RX_STAT(misqueued) },
535 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
536 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
537 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
538 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
539 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
540 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
541 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
542 { "requeue", CPDMA_RX_STAT(requeue) },
543 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
544};
545
546#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
547#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
Mugunthan V Nd9718542013-07-23 15:38:17 +0530548
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300549#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300550#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000551#define for_each_slave(priv, func, arg...) \
552 do { \
Sebastian Siewior6e6ceae2013-04-24 08:48:24 +0000553 struct cpsw_slave *slave; \
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300554 struct cpsw_common *cpsw = (priv)->cpsw; \
Sebastian Siewior6e6ceae2013-04-24 08:48:24 +0000555 int n; \
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300556 if (cpsw->data.dual_emac) \
557 (func)((cpsw)->slaves + priv->emac_port, ##arg);\
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000558 else \
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300559 for (n = cpsw->data.slaves, \
560 slave = cpsw->slaves; \
Sebastian Siewior6e6ceae2013-04-24 08:48:24 +0000561 n; n--) \
562 (func)(slave++, ##arg); \
Mugunthan V Ndf828592012-03-18 20:17:54 +0000563 } while (0)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000564
Ivan Khoronzhuk00fe4712018-11-08 22:27:57 +0200565static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
566 __be16 proto, u16 vid);
567
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +0300568static inline int cpsw_get_slave_port(u32 slave_num)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000569{
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +0300570 return slave_num + 1;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000571}
Mugunthan V Ndf828592012-03-18 20:17:54 +0000572
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530573static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
574{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300575 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
576 struct cpsw_ale *ale = cpsw->ale;
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530577 int i;
578
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300579 if (cpsw->data.dual_emac) {
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530580 bool flag = false;
581
582 /* Enabling promiscuous mode for one interface will be
583 * common for both the interface as the interface shares
584 * the same hardware resource.
585 */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300586 for (i = 0; i < cpsw->data.slaves; i++)
587 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530588 flag = true;
589
590 if (!enable && flag) {
591 enable = true;
592 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
593 }
594
595 if (enable) {
596 /* Enable Bypass */
597 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
598
599 dev_dbg(&ndev->dev, "promiscuity enabled\n");
600 } else {
601 /* Disable Bypass */
602 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
603 dev_dbg(&ndev->dev, "promiscuity disabled\n");
604 }
605 } else {
606 if (enable) {
607 unsigned long timeout = jiffies + HZ;
608
Lennart Sorensen6f979eb2014-10-31 13:28:54 -0400609 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300610 for (i = 0; i <= cpsw->data.slaves; i++) {
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530611 cpsw_ale_control_set(ale, i,
612 ALE_PORT_NOLEARN, 1);
613 cpsw_ale_control_set(ale, i,
614 ALE_PORT_NO_SA_UPDATE, 1);
615 }
616
617 /* Clear All Untouched entries */
618 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
619 do {
620 cpu_relax();
621 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
622 break;
623 } while (time_after(timeout, jiffies));
624 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
625
626 /* Clear all mcast from ALE */
Grygorii Strashko61f1cef2016-04-07 15:16:43 +0300627 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200628 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL);
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530629
630 /* Flood All Unicast Packets to Host port */
631 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
632 dev_dbg(&ndev->dev, "promiscuity enabled\n");
633 } else {
Lennart Sorensen6f979eb2014-10-31 13:28:54 -0400634 /* Don't Flood All Unicast Packets to Host port */
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530635 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
636
Lennart Sorensen6f979eb2014-10-31 13:28:54 -0400637 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300638 for (i = 0; i <= cpsw->data.slaves; i++) {
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530639 cpsw_ale_control_set(ale, i,
640 ALE_PORT_NOLEARN, 0);
641 cpsw_ale_control_set(ale, i,
642 ALE_PORT_NO_SA_UPDATE, 0);
643 }
644 dev_dbg(&ndev->dev, "promiscuity disabled\n");
645 }
646 }
647}
648
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200649struct addr_sync_ctx {
650 struct net_device *ndev;
651 const u8 *addr; /* address to be synched */
652 int consumed; /* number of address instances */
653 int flush; /* flush flag */
654};
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300655
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200656/**
657 * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes
658 * if it's not deleted
659 * @ndev: device to sync
660 * @addr: address to be added or deleted
661 * @vid: vlan id, if vid < 0 set/unset address for real device
662 * @add: add address if the flag is set or remove otherwise
663 */
664static int cpsw_set_mc(struct net_device *ndev, const u8 *addr,
665 int vid, int add)
Mugunthan V N5c50a852012-10-29 08:45:11 +0000666{
667 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300668 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200669 int mask, flags, ret;
Mugunthan V N25906052015-01-13 17:35:49 +0530670
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200671 if (vid < 0) {
672 if (cpsw->data.dual_emac)
673 vid = cpsw->slaves[priv->emac_port].port_vlan;
674 else
675 vid = 0;
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300676 }
677
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200678 mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS;
679 flags = vid ? ALE_VLAN : 0;
680
681 if (add)
682 ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0);
683 else
684 ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
685
686 return ret;
687}
688
689static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx)
690{
691 struct addr_sync_ctx *sync_ctx = ctx;
692 struct netdev_hw_addr *ha;
693 int found = 0, ret = 0;
694
695 if (!vdev || !(vdev->flags & IFF_UP))
696 return 0;
697
698 /* vlan address is relevant if its sync_cnt != 0 */
699 netdev_for_each_mc_addr(ha, vdev) {
700 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
701 found = ha->sync_cnt;
702 break;
703 }
704 }
705
706 if (found)
707 sync_ctx->consumed++;
708
709 if (sync_ctx->flush) {
710 if (!found)
711 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
712 return 0;
713 }
714
715 if (found)
716 ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1);
717
718 return ret;
719}
720
721static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num)
722{
723 struct addr_sync_ctx sync_ctx;
724 int ret;
725
726 sync_ctx.consumed = 0;
727 sync_ctx.addr = addr;
728 sync_ctx.ndev = ndev;
729 sync_ctx.flush = 0;
730
731 ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
732 if (sync_ctx.consumed < num && !ret)
733 ret = cpsw_set_mc(ndev, addr, -1, 1);
734
735 return ret;
736}
737
738static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num)
739{
740 struct addr_sync_ctx sync_ctx;
741
742 sync_ctx.consumed = 0;
743 sync_ctx.addr = addr;
744 sync_ctx.ndev = ndev;
745 sync_ctx.flush = 1;
746
747 vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx);
748 if (sync_ctx.consumed == num)
749 cpsw_set_mc(ndev, addr, -1, 0);
750
751 return 0;
752}
753
754static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx)
755{
756 struct addr_sync_ctx *sync_ctx = ctx;
757 struct netdev_hw_addr *ha;
758 int found = 0;
759
760 if (!vdev || !(vdev->flags & IFF_UP))
761 return 0;
762
763 /* vlan address is relevant if its sync_cnt != 0 */
764 netdev_for_each_mc_addr(ha, vdev) {
765 if (ether_addr_equal(ha->addr, sync_ctx->addr)) {
766 found = ha->sync_cnt;
767 break;
768 }
769 }
770
771 if (!found)
772 return 0;
773
774 sync_ctx->consumed++;
775 cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0);
776 return 0;
777}
778
779static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num)
780{
781 struct addr_sync_ctx sync_ctx;
782
783 sync_ctx.addr = addr;
784 sync_ctx.ndev = ndev;
785 sync_ctx.consumed = 0;
786
787 vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx);
788 if (sync_ctx.consumed < num)
789 cpsw_set_mc(ndev, addr, -1, 0);
790
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300791 return 0;
792}
793
794static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
795{
796 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000797
798 if (ndev->flags & IFF_PROMISC) {
799 /* Enable promiscuous mode */
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530800 cpsw_set_promiscious(ndev, true);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300801 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000802 return;
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530803 } else {
804 /* Disable promiscuous mode */
805 cpsw_set_promiscious(ndev, false);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000806 }
807
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -0400808 /* Restore allmulti on vlans if necessary */
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300809 cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -0400810
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +0200811 /* add/remove mcast address either for real netdev or for vlan */
812 __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr,
813 cpsw_del_mc_addr);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000814}
815
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300816static void cpsw_intr_enable(struct cpsw_common *cpsw)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000817{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600818 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
819 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000820
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300821 cpdma_ctlr_int_ctrl(cpsw->dma, true);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000822 return;
823}
824
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300825static void cpsw_intr_disable(struct cpsw_common *cpsw)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000826{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600827 writel_relaxed(0, &cpsw->wr_regs->tx_en);
828 writel_relaxed(0, &cpsw->wr_regs->rx_en);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000829
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300830 cpdma_ctlr_int_ctrl(cpsw->dma, false);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000831 return;
832}
833
Olof Johansson1a3b5052013-12-11 15:58:07 -0800834static void cpsw_tx_handler(void *token, int len, int status)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000835{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300836 struct netdev_queue *txq;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000837 struct sk_buff *skb = token;
838 struct net_device *ndev = skb->dev;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300839 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000840
Mugunthan V Nfae50822013-01-17 06:31:34 +0000841 /* Check whether the queue is stopped due to stalled tx dma, if the
842 * queue is stopped then start the queue as we have free desc for tx
843 */
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300844 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
845 if (unlikely(netif_tx_queue_stopped(txq)))
846 netif_tx_wake_queue(txq);
847
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300848 cpts_tx_timestamp(cpsw->cpts, skb);
Tobias Klauser8dc43dd2014-03-10 13:12:23 +0100849 ndev->stats.tx_packets++;
850 ndev->stats.tx_bytes += len;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000851 dev_kfree_skb_any(skb);
852}
853
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500854static void cpsw_rx_vlan_encap(struct sk_buff *skb)
855{
856 struct cpsw_priv *priv = netdev_priv(skb->dev);
857 struct cpsw_common *cpsw = priv->cpsw;
858 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
859 u16 vtag, vid, prio, pkt_type;
860
861 /* Remove VLAN header encapsulation word */
862 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
863
864 pkt_type = (rx_vlan_encap_hdr >>
865 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
866 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
867 /* Ignore unknown & Priority-tagged packets*/
868 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
869 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
870 return;
871
872 vid = (rx_vlan_encap_hdr >>
873 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
874 VLAN_VID_MASK;
875 /* Ignore vid 0 and pass packet as is */
876 if (!vid)
877 return;
878 /* Ignore default vlans in dual mac mode */
879 if (cpsw->data.dual_emac &&
880 vid == cpsw->slaves[priv->emac_port].port_vlan)
881 return;
882
883 prio = (rx_vlan_encap_hdr >>
884 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
885 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
886
887 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
888 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
889
890 /* strip vlan tag for VLAN-tagged packet */
891 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
892 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
893 skb_pull(skb, VLAN_HLEN);
894 }
895}
896
Olof Johansson1a3b5052013-12-11 15:58:07 -0800897static void cpsw_rx_handler(void *token, int len, int status)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000898{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300899 struct cpdma_chan *ch;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000900 struct sk_buff *skb = token;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000901 struct sk_buff *new_skb;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000902 struct net_device *ndev = skb->dev;
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300903 int ret = 0, port;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300904 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhuka9423122018-11-12 16:00:22 +0200905 struct cpsw_priv *priv;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000906
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300907 if (cpsw->data.dual_emac) {
908 port = CPDMA_RX_SOURCE_PORT(status);
909 if (port) {
910 ndev = cpsw->slaves[--port].ndev;
911 skb->dev = ndev;
912 }
913 }
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000914
Mugunthan V N16e5c572014-04-10 14:23:23 +0530915 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
Ivan Khoronzhukfe734d02017-01-19 18:58:26 +0200916 /* In dual emac mode check for all interfaces */
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +0200917 if (cpsw->data.dual_emac && cpsw->usage_count &&
Ivan Khoronzhukfe734d02017-01-19 18:58:26 +0200918 (status >= 0)) {
Mugunthan V Na0e2c822014-09-10 16:38:09 +0530919 /* The packet received is for the interface which
920 * is already down and the other interface is up
Joe Perchesdbedd442015-03-06 20:49:12 -0800921 * and running, instead of freeing which results
Mugunthan V Na0e2c822014-09-10 16:38:09 +0530922 * in reducing of the number of rx descriptor in
923 * DMA engine, requeue skb back to cpdma.
924 */
925 new_skb = skb;
926 goto requeue;
927 }
928
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000929 /* the interface is going down, skbs are purged */
Mugunthan V Ndf828592012-03-18 20:17:54 +0000930 dev_kfree_skb_any(skb);
931 return;
932 }
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000933
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300934 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000935 if (new_skb) {
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300936 skb_copy_queue_mapping(new_skb, skb);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000937 skb_put(skb, len);
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500938 if (status & CPDMA_RX_VLAN_ENCAP)
939 cpsw_rx_vlan_encap(skb);
Ivan Khoronzhuka9423122018-11-12 16:00:22 +0200940 priv = netdev_priv(ndev);
941 if (priv->rx_ts_enabled)
942 cpts_rx_timestamp(cpsw->cpts, skb);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000943 skb->protocol = eth_type_trans(skb, ndev);
944 netif_receive_skb(skb);
Tobias Klauser8dc43dd2014-03-10 13:12:23 +0100945 ndev->stats.rx_bytes += len;
946 ndev->stats.rx_packets++;
Grygorii Strashko254a49d2016-08-09 15:09:44 +0300947 kmemleak_not_leak(new_skb);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000948 } else {
Tobias Klauser8dc43dd2014-03-10 13:12:23 +0100949 ndev->stats.rx_dropped++;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000950 new_skb = skb;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000951 }
952
Mugunthan V Na0e2c822014-09-10 16:38:09 +0530953requeue:
Ivan Khoronzhukce52c742016-08-22 21:18:28 +0300954 if (netif_dormant(ndev)) {
955 dev_kfree_skb_any(new_skb);
956 return;
957 }
958
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200959 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300960 ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300961 skb_tailroom(new_skb), 0);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000962 if (WARN_ON(ret < 0))
963 dev_kfree_skb_any(new_skb);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000964}
965
Grygorii Strashko9763a892019-04-26 20:12:26 +0300966static void cpsw_split_res(struct cpsw_common *cpsw)
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200967{
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200968 u32 consumed_rate = 0, bigest_rate = 0;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200969 struct cpsw_vector *txv = cpsw->txv;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200970 int i, ch_weight, rlim_ch_num = 0;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200971 int budget, bigest_rate_ch = 0;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200972 u32 ch_rate, max_rate;
973 int ch_budget = 0;
974
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200975 for (i = 0; i < cpsw->tx_ch_num; i++) {
976 ch_rate = cpdma_chan_get_rate(txv[i].ch);
977 if (!ch_rate)
978 continue;
979
980 rlim_ch_num++;
981 consumed_rate += ch_rate;
982 }
983
984 if (cpsw->tx_ch_num == rlim_ch_num) {
985 max_rate = consumed_rate;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200986 } else if (!rlim_ch_num) {
987 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
988 bigest_rate = 0;
989 max_rate = consumed_rate;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200990 } else {
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +0200991 max_rate = cpsw->speed * 1000;
992
993 /* if max_rate is less then expected due to reduced link speed,
994 * split proportionally according next potential max speed
995 */
996 if (max_rate < consumed_rate)
997 max_rate *= 10;
998
999 if (max_rate < consumed_rate)
1000 max_rate *= 10;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02001001
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +02001002 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
1003 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
1004 (cpsw->tx_ch_num - rlim_ch_num);
1005 bigest_rate = (max_rate - consumed_rate) /
1006 (cpsw->tx_ch_num - rlim_ch_num);
1007 }
1008
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02001009 /* split tx weight/budget */
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +02001010 budget = CPSW_POLL_WEIGHT;
1011 for (i = 0; i < cpsw->tx_ch_num; i++) {
1012 ch_rate = cpdma_chan_get_rate(txv[i].ch);
1013 if (ch_rate) {
1014 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
1015 if (!txv[i].budget)
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02001016 txv[i].budget++;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +02001017 if (ch_rate > bigest_rate) {
1018 bigest_rate_ch = i;
1019 bigest_rate = ch_rate;
1020 }
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02001021
1022 ch_weight = (ch_rate * 100) / max_rate;
1023 if (!ch_weight)
1024 ch_weight++;
1025 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +02001026 } else {
1027 txv[i].budget = ch_budget;
1028 if (!bigest_rate_ch)
1029 bigest_rate_ch = i;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02001030 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +02001031 }
1032
1033 budget -= txv[i].budget;
1034 }
1035
1036 if (budget)
1037 txv[bigest_rate_ch].budget += budget;
1038
1039 /* split rx budget */
1040 budget = CPSW_POLL_WEIGHT;
1041 ch_budget = budget / cpsw->rx_ch_num;
1042 for (i = 0; i < cpsw->rx_ch_num; i++) {
1043 cpsw->rxv[i].budget = ch_budget;
1044 budget -= ch_budget;
1045 }
1046
1047 if (budget)
1048 cpsw->rxv[0].budget += budget;
1049}
1050
Felipe Balbic03abd82015-01-16 10:11:12 -06001051static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
Mugunthan V Ndf828592012-03-18 20:17:54 +00001052{
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001053 struct cpsw_common *cpsw = dev_id;
Felipe Balbi7ce67a32015-01-02 16:15:59 -06001054
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001055 writel(0, &cpsw->wr_regs->tx_en);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001056 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
Felipe Balbic03abd82015-01-16 10:11:12 -06001057
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001058 if (cpsw->quirk_irq) {
1059 disable_irq_nosync(cpsw->irqs_table[1]);
1060 cpsw->tx_irq_disabled = true;
Mugunthan V N7da11602015-08-12 15:22:53 +05301061 }
1062
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001063 napi_schedule(&cpsw->napi_tx);
Felipe Balbic03abd82015-01-16 10:11:12 -06001064 return IRQ_HANDLED;
1065}
1066
1067static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
1068{
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001069 struct cpsw_common *cpsw = dev_id;
Felipe Balbic03abd82015-01-16 10:11:12 -06001070
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001071 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001072 writel(0, &cpsw->wr_regs->rx_en);
Sebastian Siewiorfd51cf12013-04-23 07:31:37 +00001073
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001074 if (cpsw->quirk_irq) {
1075 disable_irq_nosync(cpsw->irqs_table[0]);
1076 cpsw->rx_irq_disabled = true;
Mugunthan V N7da11602015-08-12 15:22:53 +05301077 }
1078
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001079 napi_schedule(&cpsw->napi_rx);
Mugunthan V Nd354eb82015-08-04 16:06:19 +05301080 return IRQ_HANDLED;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001081}
1082
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001083static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
Mugunthan V Ndf828592012-03-18 20:17:54 +00001084{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001085 u32 ch_map;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001086 int num_tx, cur_budget, ch;
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001087 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001088 struct cpsw_vector *txv;
Mugunthan V N32a74322015-08-04 16:06:20 +05301089
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001090 /* process every unprocessed channel */
1091 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03001092 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
1093 if (!(ch_map & 0x80))
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001094 continue;
1095
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001096 txv = &cpsw->txv[ch];
1097 if (unlikely(txv->budget > budget - num_tx))
1098 cur_budget = budget - num_tx;
1099 else
1100 cur_budget = txv->budget;
1101
1102 num_tx += cpdma_chan_process(txv->ch, cur_budget);
Ivan Khoronzhuk342934a2016-11-29 17:00:50 +02001103 if (num_tx >= budget)
1104 break;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001105 }
1106
Mugunthan V N32a74322015-08-04 16:06:20 +05301107 if (num_tx < budget) {
1108 napi_complete(napi_tx);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001109 writel(0xff, &cpsw->wr_regs->tx_en);
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001110 }
1111
1112 return num_tx;
1113}
1114
1115static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
1116{
1117 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
1118 int num_tx;
1119
1120 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
1121 if (num_tx < budget) {
1122 napi_complete(napi_tx);
1123 writel(0xff, &cpsw->wr_regs->tx_en);
1124 if (cpsw->tx_irq_disabled) {
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001125 cpsw->tx_irq_disabled = false;
1126 enable_irq(cpsw->irqs_table[1]);
Mugunthan V N7da11602015-08-12 15:22:53 +05301127 }
Mugunthan V N32a74322015-08-04 16:06:20 +05301128 }
1129
Mugunthan V N32a74322015-08-04 16:06:20 +05301130 return num_tx;
1131}
1132
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001133static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
Mugunthan V N32a74322015-08-04 16:06:20 +05301134{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001135 u32 ch_map;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001136 int num_rx, cur_budget, ch;
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001137 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001138 struct cpsw_vector *rxv;
Mugunthan V N510a1e722013-02-17 22:19:20 +00001139
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001140 /* process every unprocessed channel */
1141 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
Ivan Khoronzhuk342934a2016-11-29 17:00:50 +02001142 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001143 if (!(ch_map & 0x01))
1144 continue;
1145
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001146 rxv = &cpsw->rxv[ch];
1147 if (unlikely(rxv->budget > budget - num_rx))
1148 cur_budget = budget - num_rx;
1149 else
1150 cur_budget = rxv->budget;
1151
1152 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
Ivan Khoronzhuk342934a2016-11-29 17:00:50 +02001153 if (num_rx >= budget)
1154 break;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001155 }
1156
Mugunthan V N510a1e722013-02-17 22:19:20 +00001157 if (num_rx < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001158 napi_complete_done(napi_rx, num_rx);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001159 writel(0xff, &cpsw->wr_regs->rx_en);
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001160 }
1161
1162 return num_rx;
1163}
1164
1165static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
1166{
1167 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1168 int num_rx;
1169
1170 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
1171 if (num_rx < budget) {
1172 napi_complete_done(napi_rx, num_rx);
1173 writel(0xff, &cpsw->wr_regs->rx_en);
1174 if (cpsw->rx_irq_disabled) {
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001175 cpsw->rx_irq_disabled = false;
1176 enable_irq(cpsw->irqs_table[0]);
Mugunthan V N7da11602015-08-12 15:22:53 +05301177 }
Mugunthan V N510a1e722013-02-17 22:19:20 +00001178 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001179
Mugunthan V Ndf828592012-03-18 20:17:54 +00001180 return num_rx;
1181}
1182
1183static inline void soft_reset(const char *module, void __iomem *reg)
1184{
1185 unsigned long timeout = jiffies + HZ;
1186
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001187 writel_relaxed(1, reg);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001188 do {
1189 cpu_relax();
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001190 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
Mugunthan V Ndf828592012-03-18 20:17:54 +00001191
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001192 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001193}
1194
Mugunthan V Ndf828592012-03-18 20:17:54 +00001195static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1196 struct cpsw_priv *priv)
1197{
Richard Cochran9750a3a2012-10-29 08:45:15 +00001198 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1199 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001200}
1201
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03001202static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1203{
1204 struct cpsw_common *cpsw = priv->cpsw;
1205 struct cpsw_slave *slave;
1206 u32 shift, mask, val;
1207
1208 val = readl_relaxed(&cpsw->regs->ptype);
1209
1210 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1211 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1212 mask = 7 << shift;
1213 val = val & mask;
1214
1215 return !val;
1216}
1217
1218static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1219{
1220 struct cpsw_common *cpsw = priv->cpsw;
1221 struct cpsw_slave *slave;
1222 u32 shift, mask, val;
1223
1224 val = readl_relaxed(&cpsw->regs->ptype);
1225
1226 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1227 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1228 mask = (1 << --fifo) << shift;
1229 val = on ? val | mask : val & ~mask;
1230
1231 writel_relaxed(val, &cpsw->regs->ptype);
1232}
1233
Mugunthan V Ndf828592012-03-18 20:17:54 +00001234static void _cpsw_adjust_link(struct cpsw_slave *slave,
1235 struct cpsw_priv *priv, bool *link)
1236{
1237 struct phy_device *phy = slave->phy;
1238 u32 mac_control = 0;
1239 u32 slave_port;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001240 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001241
1242 if (!phy)
1243 return;
1244
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +03001245 slave_port = cpsw_get_slave_port(slave->slave_num);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001246
1247 if (phy->link) {
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001248 mac_control = cpsw->data.mac_control;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001249
1250 /* enable forwarding */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001251 cpsw_ale_control_set(cpsw->ale, slave_port,
Mugunthan V Ndf828592012-03-18 20:17:54 +00001252 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1253
1254 if (phy->speed == 1000)
1255 mac_control |= BIT(7); /* GIGABITEN */
1256 if (phy->duplex)
1257 mac_control |= BIT(0); /* FULLDUPLEXEN */
Daniel Mack342b7b72012-09-27 09:19:34 +00001258
1259 /* set speed_in input in case RMII mode is used in 100Mbps */
1260 if (phy->speed == 100)
1261 mac_control |= BIT(15);
SZ Lin (林上智)f9db5062018-03-16 00:56:01 +08001262 /* in band mode only works in 10Mbps RGMII mode */
1263 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
Mugunthan V Na81d8762013-12-13 18:42:55 +05301264 mac_control |= BIT(18); /* In Band mode */
Daniel Mack342b7b72012-09-27 09:19:34 +00001265
Mugunthan V N1923d6e2014-09-08 22:54:02 +05301266 if (priv->rx_pause)
1267 mac_control |= BIT(3);
1268
1269 if (priv->tx_pause)
1270 mac_control |= BIT(4);
1271
Mugunthan V Ndf828592012-03-18 20:17:54 +00001272 *link = true;
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03001273
1274 if (priv->shp_cfg_speed &&
1275 priv->shp_cfg_speed != slave->phy->speed &&
1276 !cpsw_shp_is_off(priv))
1277 dev_warn(priv->dev,
1278 "Speed was changed, CBS shaper speeds are changed!");
Mugunthan V Ndf828592012-03-18 20:17:54 +00001279 } else {
1280 mac_control = 0;
1281 /* disable forwarding */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001282 cpsw_ale_control_set(cpsw->ale, slave_port,
Mugunthan V Ndf828592012-03-18 20:17:54 +00001283 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1284 }
1285
1286 if (mac_control != slave->mac_control) {
1287 phy_print_status(phy);
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001288 writel_relaxed(mac_control, &slave->sliver->mac_control);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001289 }
1290
1291 slave->mac_control = mac_control;
1292}
1293
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001294static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1295{
1296 int i, speed;
1297
1298 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1299 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1300 speed += cpsw->slaves[i].phy->speed;
1301
1302 return speed;
1303}
1304
1305static int cpsw_need_resplit(struct cpsw_common *cpsw)
1306{
1307 int i, rlim_ch_num;
1308 int speed, ch_rate;
1309
1310 /* re-split resources only in case speed was changed */
1311 speed = cpsw_get_common_speed(cpsw);
1312 if (speed == cpsw->speed || !speed)
1313 return 0;
1314
1315 cpsw->speed = speed;
1316
1317 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1318 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1319 if (!ch_rate)
1320 break;
1321
1322 rlim_ch_num++;
1323 }
1324
1325 /* cases not dependent on speed */
1326 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1327 return 0;
1328
1329 return 1;
1330}
1331
Mugunthan V Ndf828592012-03-18 20:17:54 +00001332static void cpsw_adjust_link(struct net_device *ndev)
1333{
1334 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001335 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001336 bool link = false;
1337
1338 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1339
1340 if (link) {
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001341 if (cpsw_need_resplit(cpsw))
Grygorii Strashko9763a892019-04-26 20:12:26 +03001342 cpsw_split_res(cpsw);
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001343
Mugunthan V Ndf828592012-03-18 20:17:54 +00001344 netif_carrier_on(ndev);
1345 if (netif_running(ndev))
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001346 netif_tx_wake_all_queues(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001347 } else {
1348 netif_carrier_off(ndev);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001349 netif_tx_stop_all_queues(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001350 }
1351}
1352
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001353static int cpsw_get_coalesce(struct net_device *ndev,
1354 struct ethtool_coalesce *coal)
1355{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001356 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001357
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001358 coal->rx_coalesce_usecs = cpsw->coal_intvl;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001359 return 0;
1360}
1361
1362static int cpsw_set_coalesce(struct net_device *ndev,
1363 struct ethtool_coalesce *coal)
1364{
1365 struct cpsw_priv *priv = netdev_priv(ndev);
1366 u32 int_ctrl;
1367 u32 num_interrupts = 0;
1368 u32 prescale = 0;
1369 u32 addnl_dvdr = 1;
1370 u32 coal_intvl = 0;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001371 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001372
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001373 coal_intvl = coal->rx_coalesce_usecs;
1374
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001375 int_ctrl = readl(&cpsw->wr_regs->int_control);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001376 prescale = cpsw->bus_freq_mhz * 4;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001377
Mugunthan V Na84bc2a2014-07-15 20:26:53 +05301378 if (!coal->rx_coalesce_usecs) {
1379 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1380 goto update_return;
1381 }
1382
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001383 if (coal_intvl < CPSW_CMINTMIN_INTVL)
1384 coal_intvl = CPSW_CMINTMIN_INTVL;
1385
1386 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1387 /* Interrupt pacer works with 4us Pulse, we can
1388 * throttle further by dilating the 4us pulse.
1389 */
1390 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1391
1392 if (addnl_dvdr > 1) {
1393 prescale *= addnl_dvdr;
1394 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1395 coal_intvl = (CPSW_CMINTMAX_INTVL
1396 * addnl_dvdr);
1397 } else {
1398 addnl_dvdr = 1;
1399 coal_intvl = CPSW_CMINTMAX_INTVL;
1400 }
1401 }
1402
1403 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001404 writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1405 writel(num_interrupts, &cpsw->wr_regs->tx_imax);
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001406
1407 int_ctrl |= CPSW_INTPACEEN;
1408 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1409 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
Mugunthan V Na84bc2a2014-07-15 20:26:53 +05301410
1411update_return:
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001412 writel(int_ctrl, &cpsw->wr_regs->int_control);
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001413
1414 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001415 cpsw->coal_intvl = coal_intvl;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001416
1417 return 0;
1418}
1419
Mugunthan V Nd9718542013-07-23 15:38:17 +05301420static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1421{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001422 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1423
Mugunthan V Nd9718542013-07-23 15:38:17 +05301424 switch (sset) {
1425 case ETH_SS_STATS:
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001426 return (CPSW_STATS_COMMON_LEN +
1427 (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1428 CPSW_STATS_CH_LEN);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301429 default:
1430 return -EOPNOTSUPP;
1431 }
1432}
1433
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001434static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1435{
1436 int ch_stats_len;
1437 int line;
1438 int i;
1439
1440 ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1441 for (i = 0; i < ch_stats_len; i++) {
1442 line = i % CPSW_STATS_CH_LEN;
1443 snprintf(*p, ETH_GSTRING_LEN,
Florian Fainellibf2ce3f2018-05-21 11:45:53 -07001444 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1445 (long)(i / CPSW_STATS_CH_LEN),
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001446 cpsw_gstrings_ch_stats[line].stat_string);
1447 *p += ETH_GSTRING_LEN;
1448 }
1449}
1450
Mugunthan V Nd9718542013-07-23 15:38:17 +05301451static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1452{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001453 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301454 u8 *p = data;
1455 int i;
1456
1457 switch (stringset) {
1458 case ETH_SS_STATS:
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001459 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
Mugunthan V Nd9718542013-07-23 15:38:17 +05301460 memcpy(p, cpsw_gstrings_stats[i].stat_string,
1461 ETH_GSTRING_LEN);
1462 p += ETH_GSTRING_LEN;
1463 }
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001464
1465 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1466 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301467 break;
1468 }
1469}
1470
1471static void cpsw_get_ethtool_stats(struct net_device *ndev,
1472 struct ethtool_stats *stats, u64 *data)
1473{
Mugunthan V Nd9718542013-07-23 15:38:17 +05301474 u8 *p;
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001475 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001476 struct cpdma_chan_stats ch_stats;
1477 int i, l, ch;
Mugunthan V Nd9718542013-07-23 15:38:17 +05301478
1479 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001480 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1481 data[l] = readl(cpsw->hw_stats +
1482 cpsw_gstrings_stats[l].stat_offset);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301483
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001484 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001485 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001486 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1487 p = (u8 *)&ch_stats +
1488 cpsw_gstrings_ch_stats[i].stat_offset;
1489 data[l] = *(u32 *)p;
1490 }
1491 }
Mugunthan V Nd9718542013-07-23 15:38:17 +05301492
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001493 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001494 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001495 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1496 p = (u8 *)&ch_stats +
1497 cpsw_gstrings_ch_stats[i].stat_offset;
1498 data[l] = *(u32 *)p;
Mugunthan V Nd9718542013-07-23 15:38:17 +05301499 }
1500 }
1501}
1502
Ivan Khoronzhuk27e9e102016-08-10 02:22:32 +03001503static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001504 struct sk_buff *skb,
1505 struct cpdma_chan *txch)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001506{
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001507 struct cpsw_common *cpsw = priv->cpsw;
1508
Ivan Khoronzhuk98fdd852017-06-27 16:58:51 +03001509 skb_tx_timestamp(skb);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001510 return cpdma_chan_submit(txch, skb, skb->data, skb->len,
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001511 priv->emac_port + cpsw->data.dual_emac);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001512}
1513
1514static inline void cpsw_add_dual_emac_def_ale_entries(
1515 struct cpsw_priv *priv, struct cpsw_slave *slave,
1516 u32 slave_port)
1517{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001518 struct cpsw_common *cpsw = priv->cpsw;
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03001519 u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001520
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001521 if (cpsw->version == CPSW_VERSION_1)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001522 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1523 else
1524 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001525 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001526 port_mask, port_mask, 0);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001527 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03001528 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001529 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1530 HOST_PORT_NUM, ALE_VLAN |
1531 ALE_SECURE, slave->port_vlan);
Grygorii Strashko5e5add12018-05-01 12:41:22 -05001532 cpsw_ale_control_set(cpsw->ale, slave_port,
1533 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001534}
1535
Daniel Mack1e7a2e22013-11-15 08:29:16 +01001536static void soft_reset_slave(struct cpsw_slave *slave)
Mugunthan V Ndf828592012-03-18 20:17:54 +00001537{
1538 char name[32];
Daniel Mack1e7a2e22013-11-15 08:29:16 +01001539
1540 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1541 soft_reset(name, &slave->sliver->soft_reset);
1542}
1543
1544static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1545{
Mugunthan V Ndf828592012-03-18 20:17:54 +00001546 u32 slave_port;
Sekhar Nori30c57f02017-04-03 17:34:28 +05301547 struct phy_device *phy;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03001548 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001549
Daniel Mack1e7a2e22013-11-15 08:29:16 +01001550 soft_reset_slave(slave);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001551
1552 /* setup priority mapping */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001553 writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
Richard Cochran9750a3a2012-10-29 08:45:15 +00001554
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001555 switch (cpsw->version) {
Richard Cochran9750a3a2012-10-29 08:45:15 +00001556 case CPSW_VERSION_1:
1557 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
Grygorii Strashko48f5bcc2017-05-08 14:21:21 -05001558 /* Increase RX FIFO size to 5 for supporting fullduplex
1559 * flow control mode
1560 */
1561 slave_write(slave,
1562 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1563 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
Richard Cochran9750a3a2012-10-29 08:45:15 +00001564 break;
1565 case CPSW_VERSION_2:
Mugunthan V Nc193f362013-08-05 17:30:05 +05301566 case CPSW_VERSION_3:
Mugunthan V N926489b2013-08-12 17:11:15 +05301567 case CPSW_VERSION_4:
Richard Cochran9750a3a2012-10-29 08:45:15 +00001568 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
Grygorii Strashko48f5bcc2017-05-08 14:21:21 -05001569 /* Increase RX FIFO size to 5 for supporting fullduplex
1570 * flow control mode
1571 */
1572 slave_write(slave,
1573 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1574 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
Richard Cochran9750a3a2012-10-29 08:45:15 +00001575 break;
1576 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001577
1578 /* setup max packet size, and mac address */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001579 writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001580 cpsw_set_slave_mac(slave, priv);
1581
1582 slave->mac_control = 0; /* no link yet */
1583
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +03001584 slave_port = cpsw_get_slave_port(slave->slave_num);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001585
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001586 if (cpsw->data.dual_emac)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001587 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1588 else
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001589 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001590 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001591
David Rivshind733f7542016-04-27 21:32:31 -04001592 if (slave->data->phy_node) {
Sekhar Nori30c57f02017-04-03 17:34:28 +05301593 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
Heiko Schocher9e42f712015-10-17 06:04:35 +02001594 &cpsw_adjust_link, 0, slave->data->phy_if);
Sekhar Nori30c57f02017-04-03 17:34:28 +05301595 if (!phy) {
Rob Herringf7ce9102017-07-18 16:43:19 -05001596 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1597 slave->data->phy_node,
David Rivshind733f7542016-04-27 21:32:31 -04001598 slave->slave_num);
1599 return;
1600 }
1601 } else {
Sekhar Nori30c57f02017-04-03 17:34:28 +05301602 phy = phy_connect(priv->ndev, slave->data->phy_id,
Florian Fainellif9a8f832013-01-14 00:52:52 +00001603 &cpsw_adjust_link, slave->data->phy_if);
Sekhar Nori30c57f02017-04-03 17:34:28 +05301604 if (IS_ERR(phy)) {
David Rivshind733f7542016-04-27 21:32:31 -04001605 dev_err(priv->dev,
1606 "phy \"%s\" not found on slave %d, err %ld\n",
1607 slave->data->phy_id, slave->slave_num,
Sekhar Nori30c57f02017-04-03 17:34:28 +05301608 PTR_ERR(phy));
David Rivshind733f7542016-04-27 21:32:31 -04001609 return;
1610 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001611 }
David Rivshind733f7542016-04-27 21:32:31 -04001612
Sekhar Nori30c57f02017-04-03 17:34:28 +05301613 slave->phy = phy;
1614
David Rivshind733f7542016-04-27 21:32:31 -04001615 phy_attached_info(slave->phy);
1616
1617 phy_start(slave->phy);
1618
1619 /* Configure GMII_SEL register */
Grygorii Strashko3ff18842018-11-25 18:15:25 -06001620 if (!IS_ERR(slave->data->ifphy))
1621 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
1622 slave->data->phy_if);
1623 else
1624 cpsw_phy_sel(cpsw->dev, slave->phy->interface,
1625 slave->slave_num);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001626}
1627
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001628static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1629{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001630 struct cpsw_common *cpsw = priv->cpsw;
1631 const int vlan = cpsw->data.default_vlan;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001632 u32 reg;
1633 int i;
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -04001634 int unreg_mcast_mask;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001635
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001636 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001637 CPSW2_PORT_VLAN;
1638
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001639 writel(vlan, &cpsw->host_port_regs->port_vlan);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001640
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001641 for (i = 0; i < cpsw->data.slaves; i++)
1642 slave_write(cpsw->slaves + i, vlan, reg);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001643
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -04001644 if (priv->ndev->flags & IFF_ALLMULTI)
1645 unreg_mcast_mask = ALE_ALL_PORTS;
1646 else
1647 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1648
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001649 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
Grygorii Strashko61f1cef2016-04-07 15:16:43 +03001650 ALE_ALL_PORTS, ALE_ALL_PORTS,
1651 unreg_mcast_mask);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001652}
1653
Mugunthan V Ndf828592012-03-18 20:17:54 +00001654static void cpsw_init_host_port(struct cpsw_priv *priv)
1655{
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001656 u32 fifo_mode;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001657 u32 control_reg;
1658 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001659
Mugunthan V Ndf828592012-03-18 20:17:54 +00001660 /* soft reset the controller and initialize ale */
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001661 soft_reset("cpsw", &cpsw->regs->soft_reset);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001662 cpsw_ale_start(cpsw->ale);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001663
1664 /* switch to vlan unaware mode */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001665 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001666 CPSW_ALE_VLAN_AWARE);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001667 control_reg = readl(&cpsw->regs->control);
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -05001668 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001669 writel(control_reg, &cpsw->regs->control);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001670 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001671 CPSW_FIFO_NORMAL_MODE;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001672 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001673
1674 /* setup host port priority mapping */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001675 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1676 &cpsw->host_port_regs->cpdma_tx_pri_map);
1677 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001678
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001679 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
Mugunthan V Ndf828592012-03-18 20:17:54 +00001680 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1681
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001682 if (!cpsw->data.dual_emac) {
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001683 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001684 0, 0);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001685 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03001686 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001687 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001688}
1689
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001690static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1691{
1692 struct cpsw_common *cpsw = priv->cpsw;
1693 struct sk_buff *skb;
1694 int ch_buf_num;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001695 int ch, i, ret;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001696
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001697 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001698 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001699 for (i = 0; i < ch_buf_num; i++) {
1700 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1701 cpsw->rx_packet_max,
1702 GFP_KERNEL);
1703 if (!skb) {
1704 cpsw_err(priv, ifup, "cannot allocate skb\n");
1705 return -ENOMEM;
1706 }
1707
1708 skb_set_queue_mapping(skb, ch);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001709 ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1710 skb->data, skb_tailroom(skb),
1711 0);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001712 if (ret < 0) {
1713 cpsw_err(priv, ifup,
1714 "cannot submit skb to channel %d rx, error %d\n",
1715 ch, ret);
1716 kfree_skb(skb);
1717 return ret;
1718 }
1719 kmemleak_not_leak(skb);
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001720 }
1721
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001722 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1723 ch, ch_buf_num);
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001724 }
1725
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001726 return 0;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001727}
1728
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001729static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001730{
Schuyler Patton3995d262014-03-03 16:19:06 +05301731 u32 slave_port;
1732
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +03001733 slave_port = cpsw_get_slave_port(slave->slave_num);
Schuyler Patton3995d262014-03-03 16:19:06 +05301734
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001735 if (!slave->phy)
1736 return;
1737 phy_stop(slave->phy);
1738 phy_disconnect(slave->phy);
1739 slave->phy = NULL;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001740 cpsw_ale_control_set(cpsw->ale, slave_port,
Schuyler Patton3995d262014-03-03 16:19:06 +05301741 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
Grygorii Strashko1f95ba02016-06-24 21:23:41 +03001742 soft_reset_slave(slave);
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001743}
1744
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03001745static int cpsw_tc_to_fifo(int tc, int num_tc)
1746{
1747 if (tc == num_tc - 1)
1748 return 0;
1749
1750 return CPSW_FIFO_SHAPERS_NUM - tc;
1751}
1752
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03001753static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1754{
1755 struct cpsw_common *cpsw = priv->cpsw;
1756 u32 val = 0, send_pct, shift;
1757 struct cpsw_slave *slave;
1758 int pct = 0, i;
1759
1760 if (bw > priv->shp_cfg_speed * 1000)
1761 goto err;
1762
1763 /* shaping has to stay enabled for highest fifos linearly
1764 * and fifo bw no more then interface can allow
1765 */
1766 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1767 send_pct = slave_read(slave, SEND_PERCENT);
1768 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1769 if (!bw) {
1770 if (i >= fifo || !priv->fifo_bw[i])
1771 continue;
1772
1773 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1774 continue;
1775 }
1776
1777 if (!priv->fifo_bw[i] && i > fifo) {
1778 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1779 return -EINVAL;
1780 }
1781
1782 shift = (i - 1) * 8;
1783 if (i == fifo) {
1784 send_pct &= ~(CPSW_PCT_MASK << shift);
1785 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1786 if (!val)
1787 val = 1;
1788
1789 send_pct |= val << shift;
1790 pct += val;
1791 continue;
1792 }
1793
1794 if (priv->fifo_bw[i])
1795 pct += (send_pct >> shift) & CPSW_PCT_MASK;
1796 }
1797
1798 if (pct >= 100)
1799 goto err;
1800
1801 slave_write(slave, send_pct, SEND_PERCENT);
1802 priv->fifo_bw[fifo] = bw;
1803
1804 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1805 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1806
1807 return 0;
1808err:
1809 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1810 return -EINVAL;
1811}
1812
1813static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1814{
1815 struct cpsw_common *cpsw = priv->cpsw;
1816 struct cpsw_slave *slave;
1817 u32 tx_in_ctl_rg, val;
1818 int ret;
1819
1820 ret = cpsw_set_fifo_bw(priv, fifo, bw);
1821 if (ret)
1822 return ret;
1823
1824 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1825 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1826 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1827
1828 if (!bw)
1829 cpsw_fifo_shp_on(priv, fifo, bw);
1830
1831 val = slave_read(slave, tx_in_ctl_rg);
1832 if (cpsw_shp_is_off(priv)) {
1833 /* disable FIFOs rate limited queues */
1834 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1835
1836 /* set type of FIFO queues to normal priority mode */
1837 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1838
1839 /* set type of FIFO queues to be rate limited */
1840 if (bw)
1841 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1842 else
1843 priv->shp_cfg_speed = 0;
1844 }
1845
1846 /* toggle a FIFO rate limited queue */
1847 if (bw)
1848 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1849 else
1850 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1851 slave_write(slave, val, tx_in_ctl_rg);
1852
1853 /* FIFO transmit shape enable */
1854 cpsw_fifo_shp_on(priv, fifo, bw);
1855 return 0;
1856}
1857
1858/* Defaults:
1859 * class A - prio 3
1860 * class B - prio 2
1861 * shaping for class A should be set first
1862 */
1863static int cpsw_set_cbs(struct net_device *ndev,
1864 struct tc_cbs_qopt_offload *qopt)
1865{
1866 struct cpsw_priv *priv = netdev_priv(ndev);
1867 struct cpsw_common *cpsw = priv->cpsw;
1868 struct cpsw_slave *slave;
1869 int prev_speed = 0;
1870 int tc, ret, fifo;
1871 u32 bw = 0;
1872
1873 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1874
1875 /* enable channels in backward order, as highest FIFOs must be rate
1876 * limited first and for compliance with CPDMA rate limited channels
1877 * that also used in bacward order. FIFO0 cannot be rate limited.
1878 */
1879 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1880 if (!fifo) {
1881 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1882 return -EINVAL;
1883 }
1884
1885 /* do nothing, it's disabled anyway */
1886 if (!qopt->enable && !priv->fifo_bw[fifo])
1887 return 0;
1888
1889 /* shapers can be set if link speed is known */
1890 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1891 if (slave->phy && slave->phy->link) {
1892 if (priv->shp_cfg_speed &&
1893 priv->shp_cfg_speed != slave->phy->speed)
1894 prev_speed = priv->shp_cfg_speed;
1895
1896 priv->shp_cfg_speed = slave->phy->speed;
1897 }
1898
1899 if (!priv->shp_cfg_speed) {
1900 dev_err(priv->dev, "Link speed is not known");
1901 return -1;
1902 }
1903
1904 ret = pm_runtime_get_sync(cpsw->dev);
1905 if (ret < 0) {
1906 pm_runtime_put_noidle(cpsw->dev);
1907 return ret;
1908 }
1909
1910 bw = qopt->enable ? qopt->idleslope : 0;
1911 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1912 if (ret) {
1913 priv->shp_cfg_speed = prev_speed;
1914 prev_speed = 0;
1915 }
1916
1917 if (bw && prev_speed)
1918 dev_warn(priv->dev,
1919 "Speed was changed, CBS shaper speeds are changed!");
1920
1921 pm_runtime_put_sync(cpsw->dev);
1922 return ret;
1923}
1924
Ivan Khoronzhuk4b4255e2018-07-24 00:26:33 +03001925static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1926{
1927 int fifo, bw;
1928
1929 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1930 bw = priv->fifo_bw[fifo];
1931 if (!bw)
1932 continue;
1933
1934 cpsw_set_fifo_rlimit(priv, fifo, bw);
1935 }
1936}
1937
1938static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1939{
1940 struct cpsw_common *cpsw = priv->cpsw;
1941 u32 tx_prio_map = 0;
1942 int i, tc, fifo;
1943 u32 tx_prio_rg;
1944
1945 if (!priv->mqprio_hw)
1946 return;
1947
1948 for (i = 0; i < 8; i++) {
1949 tc = netdev_get_prio_tc_map(priv->ndev, i);
1950 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1951 tx_prio_map |= fifo << (4 * i);
1952 }
1953
1954 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1955 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1956
1957 slave_write(slave, tx_prio_map, tx_prio_rg);
1958}
1959
Ivan Khoronzhuk00fe4712018-11-08 22:27:57 +02001960static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg)
1961{
1962 struct cpsw_priv *priv = arg;
1963
1964 if (!vdev)
1965 return 0;
1966
1967 cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid);
1968 return 0;
1969}
1970
Ivan Khoronzhuk4b4255e2018-07-24 00:26:33 +03001971/* restore resources after port reset */
1972static void cpsw_restore(struct cpsw_priv *priv)
1973{
Ivan Khoronzhuk00fe4712018-11-08 22:27:57 +02001974 /* restore vlan configurations */
1975 vlan_for_each(priv->ndev, cpsw_restore_vlans, priv);
1976
Ivan Khoronzhuk4b4255e2018-07-24 00:26:33 +03001977 /* restore MQPRIO offload */
1978 for_each_slave(priv, cpsw_mqprio_resume, priv);
1979
1980 /* restore CBS offload */
1981 for_each_slave(priv, cpsw_cbs_resume, priv);
1982}
1983
Mugunthan V Ndf828592012-03-18 20:17:54 +00001984static int cpsw_ndo_open(struct net_device *ndev)
1985{
1986 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03001987 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001988 int ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001989 u32 reg;
1990
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03001991 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashko108a6532016-06-24 21:23:42 +03001992 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03001993 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashko108a6532016-06-24 21:23:42 +03001994 return ret;
1995 }
Grygorii Strashko3fa88c52016-04-19 21:09:49 +03001996
Mugunthan V Ndf828592012-03-18 20:17:54 +00001997 netif_carrier_off(ndev);
1998
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001999 /* Notify the stack of the actual queue counts. */
2000 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
2001 if (ret) {
2002 dev_err(priv->dev, "cannot set real number of tx queues\n");
2003 goto err_cleanup;
2004 }
2005
2006 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
2007 if (ret) {
2008 dev_err(priv->dev, "cannot set real number of rx queues\n");
2009 goto err_cleanup;
2010 }
2011
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002012 reg = cpsw->version;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002013
2014 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
2015 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
2016 CPSW_RTL_VERSION(reg));
2017
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002018 /* Initialize host and slave ports */
2019 if (!cpsw->usage_count)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002020 cpsw_init_host_port(priv);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002021 for_each_slave(priv, cpsw_slave_open, priv);
2022
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002023 /* Add default VLAN */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002024 if (!cpsw->data.dual_emac)
Mugunthan V Ne6afea02014-06-18 17:21:48 +05302025 cpsw_add_default_vlan(priv);
2026 else
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002027 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
Grygorii Strashko61f1cef2016-04-07 15:16:43 +03002028 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002029
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002030 /* initialize shared resources for every ndev */
2031 if (!cpsw->usage_count) {
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002032 /* disable priority elevation */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06002033 writel_relaxed(0, &cpsw->regs->ptype);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002034
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002035 /* enable statistics collection only on all ports */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06002036 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002037
Mugunthan V N1923d6e2014-09-08 22:54:02 +05302038 /* Enable internal fifo flow control */
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03002039 writel(0x7, &cpsw->regs->flow_control);
Mugunthan V N1923d6e2014-09-08 22:54:02 +05302040
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03002041 napi_enable(&cpsw->napi_rx);
2042 napi_enable(&cpsw->napi_tx);
Mugunthan V Nd354eb82015-08-04 16:06:19 +05302043
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03002044 if (cpsw->tx_irq_disabled) {
2045 cpsw->tx_irq_disabled = false;
2046 enable_irq(cpsw->irqs_table[1]);
Mugunthan V N7da11602015-08-12 15:22:53 +05302047 }
2048
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03002049 if (cpsw->rx_irq_disabled) {
2050 cpsw->rx_irq_disabled = false;
2051 enable_irq(cpsw->irqs_table[0]);
Mugunthan V N7da11602015-08-12 15:22:53 +05302052 }
2053
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03002054 ret = cpsw_fill_rx_channels(priv);
2055 if (ret < 0)
2056 goto err_cleanup;
Mugunthan V Nf280e892013-12-11 22:09:05 -06002057
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06002058 if (cpts_register(cpsw->cpts))
Mugunthan V Nf280e892013-12-11 22:09:05 -06002059 dev_err(priv->dev, "error registering cpts device\n");
2060
Mugunthan V Ndf828592012-03-18 20:17:54 +00002061 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00002062
Ivan Khoronzhuk4b4255e2018-07-24 00:26:33 +03002063 cpsw_restore(priv);
2064
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00002065 /* Enable Interrupt pacing if configured */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002066 if (cpsw->coal_intvl != 0) {
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00002067 struct ethtool_coalesce coal;
2068
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002069 coal.rx_coalesce_usecs = cpsw->coal_intvl;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00002070 cpsw_set_coalesce(ndev, &coal);
2071 }
2072
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002073 cpdma_ctlr_start(cpsw->dma);
2074 cpsw_intr_enable(cpsw);
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002075 cpsw->usage_count++;
Mugunthan V Nf63a9752014-04-10 14:23:24 +05302076
Mugunthan V Ndf828592012-03-18 20:17:54 +00002077 return 0;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002078
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00002079err_cleanup:
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002080 cpdma_ctlr_stop(cpsw->dma);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002081 for_each_slave(priv, cpsw_slave_stop, cpsw);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002082 pm_runtime_put_sync(cpsw->dev);
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00002083 netif_carrier_off(priv->ndev);
2084 return ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002085}
2086
2087static int cpsw_ndo_stop(struct net_device *ndev)
2088{
2089 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002090 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002091
2092 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +02002093 __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002094 netif_tx_stop_all_queues(priv->ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002095 netif_carrier_off(priv->ndev);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002096
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002097 if (cpsw->usage_count <= 1) {
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03002098 napi_disable(&cpsw->napi_rx);
2099 napi_disable(&cpsw->napi_tx);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002100 cpts_unregister(cpsw->cpts);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002101 cpsw_intr_disable(cpsw);
2102 cpdma_ctlr_stop(cpsw->dma);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002103 cpsw_ale_stop(cpsw->ale);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002104 }
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002105 for_each_slave(priv, cpsw_slave_stop, cpsw);
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02002106
2107 if (cpsw_need_resplit(cpsw))
Grygorii Strashko9763a892019-04-26 20:12:26 +03002108 cpsw_split_res(cpsw);
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02002109
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002110 cpsw->usage_count--;
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002111 pm_runtime_put_sync(cpsw->dev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002112 return 0;
2113}
2114
2115static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
2116 struct net_device *ndev)
2117{
2118 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002119 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhukf44f8412017-06-27 16:58:52 +03002120 struct cpts *cpts = cpsw->cpts;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002121 struct netdev_queue *txq;
2122 struct cpdma_chan *txch;
2123 int ret, q_idx;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002124
Mugunthan V Ndf828592012-03-18 20:17:54 +00002125 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
2126 cpsw_err(priv, tx_err, "packet pad failed\n");
Tobias Klauser8dc43dd2014-03-10 13:12:23 +01002127 ndev->stats.tx_dropped++;
Ivan Khoronzhuk1bf96052017-02-11 03:49:57 +02002128 return NET_XMIT_DROP;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002129 }
2130
Mugunthan V N9232b162013-02-11 09:52:19 +00002131 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002132 priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb))
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002133 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2134
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002135 q_idx = skb_get_queue_mapping(skb);
2136 if (q_idx >= cpsw->tx_ch_num)
2137 q_idx = q_idx % cpsw->tx_ch_num;
2138
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002139 txch = cpsw->txv[q_idx].ch;
Grygorii Strashko62f94c22018-02-06 19:17:06 -06002140 txq = netdev_get_tx_queue(ndev, q_idx);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002141 ret = cpsw_tx_packet_submit(priv, skb, txch);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002142 if (unlikely(ret != 0)) {
2143 cpsw_err(priv, tx_err, "desc submit failed\n");
2144 goto fail;
2145 }
2146
Mugunthan V Nfae50822013-01-17 06:31:34 +00002147 /* If there is no more tx desc left free then we need to
2148 * tell the kernel to stop sending us tx frames.
2149 */
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002150 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002151 netif_tx_stop_queue(txq);
Grygorii Strashko62f94c22018-02-06 19:17:06 -06002152
2153 /* Barrier, so that stop_queue visible to other cpus */
2154 smp_mb__after_atomic();
2155
2156 if (cpdma_check_free_tx_desc(txch))
2157 netif_tx_wake_queue(txq);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002158 }
Mugunthan V Nfae50822013-01-17 06:31:34 +00002159
Mugunthan V Ndf828592012-03-18 20:17:54 +00002160 return NETDEV_TX_OK;
2161fail:
Tobias Klauser8dc43dd2014-03-10 13:12:23 +01002162 ndev->stats.tx_dropped++;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002163 netif_tx_stop_queue(txq);
Grygorii Strashko62f94c22018-02-06 19:17:06 -06002164
2165 /* Barrier, so that stop_queue visible to other cpus */
2166 smp_mb__after_atomic();
2167
2168 if (cpdma_check_free_tx_desc(txch))
2169 netif_tx_wake_queue(txq);
2170
Mugunthan V Ndf828592012-03-18 20:17:54 +00002171 return NETDEV_TX_BUSY;
2172}
2173
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002174#if IS_ENABLED(CONFIG_TI_CPTS)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002175
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002176static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002177{
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002178 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002179 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002180 u32 ts_en, seq_id;
2181
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002182 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002183 slave_write(slave, 0, CPSW1_TS_CTL);
2184 return;
2185 }
2186
2187 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2188 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
2189
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002190 if (priv->tx_ts_enabled)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002191 ts_en |= CPSW_V1_TS_TX_EN;
2192
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002193 if (priv->rx_ts_enabled)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002194 ts_en |= CPSW_V1_TS_RX_EN;
2195
2196 slave_write(slave, ts_en, CPSW1_TS_CTL);
2197 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
2198}
2199
2200static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
2201{
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002202 struct cpsw_slave *slave;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03002203 struct cpsw_common *cpsw = priv->cpsw;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002204 u32 ctrl, mtype;
2205
Ivan Khoronzhukcb7d78d02016-12-10 14:23:46 +02002206 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002207
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002208 ctrl = slave_read(slave, CPSW2_CONTROL);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002209 switch (cpsw->version) {
George Cherian09c55372014-05-02 12:02:02 +05302210 case CPSW_VERSION_2:
2211 ctrl &= ~CTRL_V2_ALL_TS_MASK;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002212
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002213 if (priv->tx_ts_enabled)
George Cherian09c55372014-05-02 12:02:02 +05302214 ctrl |= CTRL_V2_TX_TS_BITS;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002215
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002216 if (priv->rx_ts_enabled)
George Cherian09c55372014-05-02 12:02:02 +05302217 ctrl |= CTRL_V2_RX_TS_BITS;
Richard Cochran26fe7eb2015-05-25 11:02:13 +02002218 break;
George Cherian09c55372014-05-02 12:02:02 +05302219 case CPSW_VERSION_3:
2220 default:
2221 ctrl &= ~CTRL_V3_ALL_TS_MASK;
2222
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002223 if (priv->tx_ts_enabled)
George Cherian09c55372014-05-02 12:02:02 +05302224 ctrl |= CTRL_V3_TX_TS_BITS;
2225
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002226 if (priv->rx_ts_enabled)
George Cherian09c55372014-05-02 12:02:02 +05302227 ctrl |= CTRL_V3_RX_TS_BITS;
Richard Cochran26fe7eb2015-05-25 11:02:13 +02002228 break;
George Cherian09c55372014-05-02 12:02:02 +05302229 }
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002230
2231 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
2232
2233 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
2234 slave_write(slave, ctrl, CPSW2_CONTROL);
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06002235 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
Ivan Khoronzhuk1ebb2442018-11-12 16:00:23 +02002236 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002237}
2238
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002239static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002240{
Mugunthan V N3177bf62012-11-27 07:53:40 +00002241 struct cpsw_priv *priv = netdev_priv(dev);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002242 struct hwtstamp_config cfg;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002243 struct cpsw_common *cpsw = priv->cpsw;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002244
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002245 if (cpsw->version != CPSW_VERSION_1 &&
2246 cpsw->version != CPSW_VERSION_2 &&
2247 cpsw->version != CPSW_VERSION_3)
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002248 return -EOPNOTSUPP;
2249
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002250 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2251 return -EFAULT;
2252
2253 /* reserved for future extensions */
2254 if (cfg.flags)
2255 return -EINVAL;
2256
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002257 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002258 return -ERANGE;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002259
2260 switch (cfg.rx_filter) {
2261 case HWTSTAMP_FILTER_NONE:
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002262 priv->rx_ts_enabled = 0;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002263 break;
2264 case HWTSTAMP_FILTER_ALL:
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002265 case HWTSTAMP_FILTER_NTP_ALL:
2266 return -ERANGE;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002267 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2268 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2269 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002270 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002271 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2272 break;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002273 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2274 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2275 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2276 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2277 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2278 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2279 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2280 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2281 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002282 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002283 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2284 break;
2285 default:
2286 return -ERANGE;
2287 }
2288
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002289 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002290
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002291 switch (cpsw->version) {
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002292 case CPSW_VERSION_1:
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002293 cpsw_hwtstamp_v1(priv);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002294 break;
2295 case CPSW_VERSION_2:
George Cherianf7d403c2014-05-02 12:02:01 +05302296 case CPSW_VERSION_3:
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002297 cpsw_hwtstamp_v2(priv);
2298 break;
2299 default:
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002300 WARN_ON(1);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002301 }
2302
2303 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2304}
2305
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002306static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2307{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002308 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002309 struct cpsw_priv *priv = netdev_priv(dev);
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002310 struct hwtstamp_config cfg;
2311
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002312 if (cpsw->version != CPSW_VERSION_1 &&
2313 cpsw->version != CPSW_VERSION_2 &&
2314 cpsw->version != CPSW_VERSION_3)
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002315 return -EOPNOTSUPP;
2316
2317 cfg.flags = 0;
Ivan Khoronzhuka9423122018-11-12 16:00:22 +02002318 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2319 cfg.rx_filter = priv->rx_ts_enabled;
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002320
2321 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2322}
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002323#else
2324static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2325{
2326 return -EOPNOTSUPP;
2327}
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002328
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002329static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2330{
2331 return -EOPNOTSUPP;
2332}
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002333#endif /*CONFIG_TI_CPTS*/
2334
2335static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2336{
Mugunthan V N11f2c982013-03-11 23:16:38 +00002337 struct cpsw_priv *priv = netdev_priv(dev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002338 struct cpsw_common *cpsw = priv->cpsw;
2339 int slave_no = cpsw_slave_index(cpsw, priv);
Mugunthan V N11f2c982013-03-11 23:16:38 +00002340
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002341 if (!netif_running(dev))
2342 return -EINVAL;
2343
Mugunthan V N11f2c982013-03-11 23:16:38 +00002344 switch (cmd) {
Mugunthan V N11f2c982013-03-11 23:16:38 +00002345 case SIOCSHWTSTAMP:
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002346 return cpsw_hwtstamp_set(dev, req);
2347 case SIOCGHWTSTAMP:
2348 return cpsw_hwtstamp_get(dev, req);
Mugunthan V N11f2c982013-03-11 23:16:38 +00002349 }
2350
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002351 if (!cpsw->slaves[slave_no].phy)
Stefan Sørensenc1b59942014-02-16 14:54:25 +01002352 return -EOPNOTSUPP;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002353 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002354}
2355
Mugunthan V Ndf828592012-03-18 20:17:54 +00002356static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2357{
2358 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002359 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002360 int ch;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002361
2362 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
Tobias Klauser8dc43dd2014-03-10 13:12:23 +01002363 ndev->stats.tx_errors++;
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002364 cpsw_intr_disable(cpsw);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002365 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002366 cpdma_chan_stop(cpsw->txv[ch].ch);
2367 cpdma_chan_start(cpsw->txv[ch].ch);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002368 }
2369
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002370 cpsw_intr_enable(cpsw);
Grygorii Strashko75514b62017-03-31 18:41:23 -05002371 netif_trans_update(ndev);
2372 netif_tx_wake_all_queues(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002373}
2374
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302375static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2376{
2377 struct cpsw_priv *priv = netdev_priv(ndev);
2378 struct sockaddr *addr = (struct sockaddr *)p;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002379 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302380 int flags = 0;
2381 u16 vid = 0;
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002382 int ret;
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302383
2384 if (!is_valid_ether_addr(addr->sa_data))
2385 return -EADDRNOTAVAIL;
2386
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002387 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002388 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002389 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002390 return ret;
2391 }
2392
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002393 if (cpsw->data.dual_emac) {
2394 vid = cpsw->slaves[priv->emac_port].port_vlan;
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302395 flags = ALE_VLAN;
2396 }
2397
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002398 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302399 flags, vid);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002400 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302401 flags, vid);
2402
2403 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2404 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2405 for_each_slave(priv, cpsw_set_slave_mac, priv);
2406
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002407 pm_runtime_put(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002408
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302409 return 0;
2410}
2411
Mugunthan V Ndf828592012-03-18 20:17:54 +00002412#ifdef CONFIG_NET_POLL_CONTROLLER
2413static void cpsw_ndo_poll_controller(struct net_device *ndev)
2414{
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03002415 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002416
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03002417 cpsw_intr_disable(cpsw);
2418 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2419 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2420 cpsw_intr_enable(cpsw);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002421}
2422#endif
2423
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002424static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2425 unsigned short vid)
2426{
2427 int ret;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302428 int unreg_mcast_mask = 0;
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002429 int mcast_mask;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302430 u32 port_mask;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002431 struct cpsw_common *cpsw = priv->cpsw;
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -04002432
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002433 if (cpsw->data.dual_emac) {
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302434 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002435
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002436 mcast_mask = ALE_PORT_HOST;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302437 if (priv->ndev->flags & IFF_ALLMULTI)
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002438 unreg_mcast_mask = mcast_mask;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302439 } else {
2440 port_mask = ALE_ALL_PORTS;
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002441 mcast_mask = port_mask;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302442
2443 if (priv->ndev->flags & IFF_ALLMULTI)
2444 unreg_mcast_mask = ALE_ALL_PORTS;
2445 else
2446 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2447 }
2448
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002449 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
Grygorii Strashko61f1cef2016-04-07 15:16:43 +03002450 unreg_mcast_mask);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002451 if (ret != 0)
2452 return ret;
2453
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002454 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03002455 HOST_PORT_NUM, ALE_VLAN, vid);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002456 if (ret != 0)
2457 goto clean_vid;
2458
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002459 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002460 mcast_mask, ALE_VLAN, vid, 0);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002461 if (ret != 0)
2462 goto clean_vlan_ucast;
2463 return 0;
2464
2465clean_vlan_ucast:
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002466 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03002467 HOST_PORT_NUM, ALE_VLAN, vid);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002468clean_vid:
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002469 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002470 return ret;
2471}
2472
2473static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00002474 __be16 proto, u16 vid)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002475{
2476 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002477 struct cpsw_common *cpsw = priv->cpsw;
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002478 int ret;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002479
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002480 if (vid == cpsw->data.default_vlan)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002481 return 0;
2482
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002483 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002484 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002485 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002486 return ret;
2487 }
2488
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002489 if (cpsw->data.dual_emac) {
Mugunthan V N02a54162015-01-22 15:19:22 +05302490 /* In dual EMAC, reserved VLAN id should not be used for
2491 * creating VLAN interfaces as this can break the dual
2492 * EMAC port separation
2493 */
2494 int i;
2495
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002496 for (i = 0; i < cpsw->data.slaves; i++) {
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002497 if (vid == cpsw->slaves[i].port_vlan) {
2498 ret = -EINVAL;
2499 goto err;
2500 }
Mugunthan V N02a54162015-01-22 15:19:22 +05302501 }
2502 }
2503
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002504 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002505 ret = cpsw_add_vlan_ale_entry(priv, vid);
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002506err:
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002507 pm_runtime_put(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002508 return ret;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002509}
2510
2511static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00002512 __be16 proto, u16 vid)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002513{
2514 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002515 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002516 int ret;
2517
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002518 if (vid == cpsw->data.default_vlan)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002519 return 0;
2520
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002521 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002522 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002523 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002524 return ret;
2525 }
2526
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002527 if (cpsw->data.dual_emac) {
Mugunthan V N02a54162015-01-22 15:19:22 +05302528 int i;
2529
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002530 for (i = 0; i < cpsw->data.slaves; i++) {
2531 if (vid == cpsw->slaves[i].port_vlan)
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002532 goto err;
Mugunthan V N02a54162015-01-22 15:19:22 +05302533 }
2534 }
2535
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002536 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002537 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
Ivan Khoronzhukbe35b982018-08-10 15:47:08 +03002538 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2539 HOST_PORT_NUM, ALE_VLAN, vid);
2540 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2541 0, ALE_VLAN, vid);
Ivan Khoronzhuk15180ec2018-11-08 22:27:56 +02002542 ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid);
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002543err:
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002544 pm_runtime_put(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002545 return ret;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002546}
2547
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002548static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2549{
2550 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002551 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk52986a22016-12-10 14:23:50 +02002552 struct cpsw_slave *slave;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002553 u32 min_rate;
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002554 u32 ch_rate;
Ivan Khoronzhuk52986a22016-12-10 14:23:50 +02002555 int i, ret;
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002556
2557 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2558 if (ch_rate == rate)
2559 return 0;
2560
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002561 ch_rate = rate * 1000;
2562 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2563 if ((ch_rate < min_rate && ch_rate)) {
2564 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2565 min_rate);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002566 return -EINVAL;
2567 }
2568
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02002569 if (rate > cpsw->speed) {
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002570 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002571 return -EINVAL;
2572 }
2573
2574 ret = pm_runtime_get_sync(cpsw->dev);
2575 if (ret < 0) {
2576 pm_runtime_put_noidle(cpsw->dev);
2577 return ret;
2578 }
2579
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002580 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002581 pm_runtime_put(cpsw->dev);
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002582
2583 if (ret)
2584 return ret;
2585
Ivan Khoronzhuk52986a22016-12-10 14:23:50 +02002586 /* update rates for slaves tx queues */
2587 for (i = 0; i < cpsw->data.slaves; i++) {
2588 slave = &cpsw->slaves[i];
2589 if (!slave->ndev)
2590 continue;
2591
2592 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2593 }
2594
Grygorii Strashko9763a892019-04-26 20:12:26 +03002595 cpsw_split_res(cpsw);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002596 return ret;
2597}
2598
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03002599static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2600{
2601 struct tc_mqprio_qopt_offload *mqprio = type_data;
2602 struct cpsw_priv *priv = netdev_priv(ndev);
2603 struct cpsw_common *cpsw = priv->cpsw;
2604 int fifo, num_tc, count, offset;
2605 struct cpsw_slave *slave;
2606 u32 tx_prio_map = 0;
2607 int i, tc, ret;
2608
2609 num_tc = mqprio->qopt.num_tc;
2610 if (num_tc > CPSW_TC_NUM)
2611 return -EINVAL;
2612
2613 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2614 return -EINVAL;
2615
2616 ret = pm_runtime_get_sync(cpsw->dev);
2617 if (ret < 0) {
2618 pm_runtime_put_noidle(cpsw->dev);
2619 return ret;
2620 }
2621
2622 if (num_tc) {
2623 for (i = 0; i < 8; i++) {
2624 tc = mqprio->qopt.prio_tc_map[i];
2625 fifo = cpsw_tc_to_fifo(tc, num_tc);
2626 tx_prio_map |= fifo << (4 * i);
2627 }
2628
2629 netdev_set_num_tc(ndev, num_tc);
2630 for (i = 0; i < num_tc; i++) {
2631 count = mqprio->qopt.count[i];
2632 offset = mqprio->qopt.offset[i];
2633 netdev_set_tc_queue(ndev, i, count, offset);
2634 }
2635 }
2636
2637 if (!mqprio->qopt.hw) {
2638 /* restore default configuration */
2639 netdev_reset_tc(ndev);
2640 tx_prio_map = TX_PRIORITY_MAPPING;
2641 }
2642
2643 priv->mqprio_hw = mqprio->qopt.hw;
2644
2645 offset = cpsw->version == CPSW_VERSION_1 ?
2646 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2647
2648 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2649 slave_write(slave, tx_prio_map, offset);
2650
2651 pm_runtime_put_sync(cpsw->dev);
2652
2653 return 0;
2654}
2655
2656static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2657 void *type_data)
2658{
2659 switch (type) {
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03002660 case TC_SETUP_QDISC_CBS:
2661 return cpsw_set_cbs(ndev, type_data);
2662
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03002663 case TC_SETUP_QDISC_MQPRIO:
2664 return cpsw_set_mqprio(ndev, type_data);
2665
2666 default:
2667 return -EOPNOTSUPP;
2668 }
2669}
2670
Mugunthan V Ndf828592012-03-18 20:17:54 +00002671static const struct net_device_ops cpsw_netdev_ops = {
2672 .ndo_open = cpsw_ndo_open,
2673 .ndo_stop = cpsw_ndo_stop,
2674 .ndo_start_xmit = cpsw_ndo_start_xmit,
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302675 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002676 .ndo_do_ioctl = cpsw_ndo_ioctl,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002677 .ndo_validate_addr = eth_validate_addr,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002678 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
Mugunthan V N5c50a852012-10-29 08:45:11 +00002679 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002680 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002681#ifdef CONFIG_NET_POLL_CONTROLLER
2682 .ndo_poll_controller = cpsw_ndo_poll_controller,
2683#endif
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002684 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
2685 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03002686 .ndo_setup_tc = cpsw_ndo_setup_tc,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002687};
2688
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302689static int cpsw_get_regs_len(struct net_device *ndev)
2690{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002691 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302692
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002693 return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302694}
2695
2696static void cpsw_get_regs(struct net_device *ndev,
2697 struct ethtool_regs *regs, void *p)
2698{
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302699 u32 *reg = p;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002700 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302701
2702 /* update CPSW IP version */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002703 regs->version = cpsw->version;
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302704
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002705 cpsw_ale_dump(cpsw->ale, reg);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302706}
2707
Mugunthan V Ndf828592012-03-18 20:17:54 +00002708static void cpsw_get_drvinfo(struct net_device *ndev,
2709 struct ethtool_drvinfo *info)
2710{
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002711 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002712 struct platform_device *pdev = to_platform_device(cpsw->dev);
Jiri Pirko7826d432013-01-06 00:44:26 +00002713
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302714 strlcpy(info->driver, "cpsw", sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +00002715 strlcpy(info->version, "1.0", sizeof(info->version));
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002716 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
Mugunthan V Ndf828592012-03-18 20:17:54 +00002717}
2718
2719static u32 cpsw_get_msglevel(struct net_device *ndev)
2720{
2721 struct cpsw_priv *priv = netdev_priv(ndev);
2722 return priv->msg_enable;
2723}
2724
2725static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2726{
2727 struct cpsw_priv *priv = netdev_priv(ndev);
2728 priv->msg_enable = value;
2729}
2730
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002731#if IS_ENABLED(CONFIG_TI_CPTS)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002732static int cpsw_get_ts_info(struct net_device *ndev,
2733 struct ethtool_ts_info *info)
2734{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002735 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002736
2737 info->so_timestamping =
2738 SOF_TIMESTAMPING_TX_HARDWARE |
2739 SOF_TIMESTAMPING_TX_SOFTWARE |
2740 SOF_TIMESTAMPING_RX_HARDWARE |
2741 SOF_TIMESTAMPING_RX_SOFTWARE |
2742 SOF_TIMESTAMPING_SOFTWARE |
2743 SOF_TIMESTAMPING_RAW_HARDWARE;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002744 info->phc_index = cpsw->cpts->phc_index;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002745 info->tx_types =
2746 (1 << HWTSTAMP_TX_OFF) |
2747 (1 << HWTSTAMP_TX_ON);
2748 info->rx_filters =
2749 (1 << HWTSTAMP_FILTER_NONE) |
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002750 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002751 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002752 return 0;
2753}
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002754#else
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002755static int cpsw_get_ts_info(struct net_device *ndev,
2756 struct ethtool_ts_info *info)
2757{
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002758 info->so_timestamping =
2759 SOF_TIMESTAMPING_TX_SOFTWARE |
2760 SOF_TIMESTAMPING_RX_SOFTWARE |
2761 SOF_TIMESTAMPING_SOFTWARE;
2762 info->phc_index = -1;
2763 info->tx_types = 0;
2764 info->rx_filters = 0;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002765 return 0;
2766}
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002767#endif
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002768
Philippe Reynes24798762016-10-08 17:46:15 +02002769static int cpsw_get_link_ksettings(struct net_device *ndev,
2770 struct ethtool_link_ksettings *ecmd)
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002771{
2772 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002773 struct cpsw_common *cpsw = priv->cpsw;
2774 int slave_no = cpsw_slave_index(cpsw, priv);
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002775
yuval.shaia@oracle.com55141742017-06-13 10:09:46 +03002776 if (!cpsw->slaves[slave_no].phy)
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002777 return -EOPNOTSUPP;
yuval.shaia@oracle.com55141742017-06-13 10:09:46 +03002778
2779 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2780 return 0;
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002781}
2782
Philippe Reynes24798762016-10-08 17:46:15 +02002783static int cpsw_set_link_ksettings(struct net_device *ndev,
2784 const struct ethtool_link_ksettings *ecmd)
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002785{
2786 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002787 struct cpsw_common *cpsw = priv->cpsw;
2788 int slave_no = cpsw_slave_index(cpsw, priv);
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002789
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002790 if (cpsw->slaves[slave_no].phy)
Philippe Reynes24798762016-10-08 17:46:15 +02002791 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2792 ecmd);
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002793 else
2794 return -EOPNOTSUPP;
2795}
2796
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002797static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2798{
2799 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002800 struct cpsw_common *cpsw = priv->cpsw;
2801 int slave_no = cpsw_slave_index(cpsw, priv);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002802
2803 wol->supported = 0;
2804 wol->wolopts = 0;
2805
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002806 if (cpsw->slaves[slave_no].phy)
2807 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002808}
2809
2810static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2811{
2812 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002813 struct cpsw_common *cpsw = priv->cpsw;
2814 int slave_no = cpsw_slave_index(cpsw, priv);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002815
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002816 if (cpsw->slaves[slave_no].phy)
2817 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002818 else
2819 return -EOPNOTSUPP;
2820}
2821
Mugunthan V N1923d6e2014-09-08 22:54:02 +05302822static void cpsw_get_pauseparam(struct net_device *ndev,
2823 struct ethtool_pauseparam *pause)
2824{
2825 struct cpsw_priv *priv = netdev_priv(ndev);
2826
2827 pause->autoneg = AUTONEG_DISABLE;
2828 pause->rx_pause = priv->rx_pause ? true : false;
2829 pause->tx_pause = priv->tx_pause ? true : false;
2830}
2831
2832static int cpsw_set_pauseparam(struct net_device *ndev,
2833 struct ethtool_pauseparam *pause)
2834{
2835 struct cpsw_priv *priv = netdev_priv(ndev);
2836 bool link;
2837
2838 priv->rx_pause = pause->rx_pause ? true : false;
2839 priv->tx_pause = pause->tx_pause ? true : false;
2840
2841 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
Mugunthan V N1923d6e2014-09-08 22:54:02 +05302842 return 0;
2843}
2844
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002845static int cpsw_ethtool_op_begin(struct net_device *ndev)
2846{
2847 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002848 struct cpsw_common *cpsw = priv->cpsw;
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002849 int ret;
2850
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002851 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002852 if (ret < 0) {
2853 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002854 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002855 }
2856
2857 return ret;
2858}
2859
2860static void cpsw_ethtool_op_complete(struct net_device *ndev)
2861{
2862 struct cpsw_priv *priv = netdev_priv(ndev);
2863 int ret;
2864
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002865 ret = pm_runtime_put(priv->cpsw->dev);
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002866 if (ret < 0)
2867 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2868}
2869
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002870static void cpsw_get_channels(struct net_device *ndev,
2871 struct ethtool_channels *ch)
2872{
2873 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2874
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03002875 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2876 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002877 ch->max_combined = 0;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002878 ch->max_other = 0;
2879 ch->other_count = 0;
2880 ch->rx_count = cpsw->rx_ch_num;
2881 ch->tx_count = cpsw->tx_ch_num;
2882 ch->combined_count = 0;
2883}
2884
2885static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2886 struct ethtool_channels *ch)
2887{
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03002888 if (cpsw->quirk_irq) {
2889 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2890 return -EOPNOTSUPP;
2891 }
2892
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002893 if (ch->combined_count)
2894 return -EINVAL;
2895
2896 /* verify we have at least one channel in each direction */
2897 if (!ch->rx_count || !ch->tx_count)
2898 return -EINVAL;
2899
2900 if (ch->rx_count > cpsw->data.channels ||
2901 ch->tx_count > cpsw->data.channels)
2902 return -EINVAL;
2903
2904 return 0;
2905}
2906
2907static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2908{
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002909 struct cpsw_common *cpsw = priv->cpsw;
2910 void (*handler)(void *, int, int);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002911 struct netdev_queue *queue;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002912 struct cpsw_vector *vec;
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03002913 int ret, *ch, vch;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002914
2915 if (rx) {
2916 ch = &cpsw->rx_ch_num;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002917 vec = cpsw->rxv;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002918 handler = cpsw_rx_handler;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002919 } else {
2920 ch = &cpsw->tx_ch_num;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002921 vec = cpsw->txv;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002922 handler = cpsw_tx_handler;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002923 }
2924
2925 while (*ch < ch_num) {
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03002926 vch = rx ? *ch : 7 - *ch;
2927 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002928 queue = netdev_get_tx_queue(priv->ndev, *ch);
2929 queue->tx_maxrate = 0;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002930
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002931 if (IS_ERR(vec[*ch].ch))
2932 return PTR_ERR(vec[*ch].ch);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002933
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002934 if (!vec[*ch].ch)
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002935 return -EINVAL;
2936
2937 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2938 (rx ? "rx" : "tx"));
2939 (*ch)++;
2940 }
2941
2942 while (*ch > ch_num) {
2943 (*ch)--;
2944
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002945 ret = cpdma_chan_destroy(vec[*ch].ch);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002946 if (ret)
2947 return ret;
2948
2949 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2950 (rx ? "rx" : "tx"));
2951 }
2952
2953 return 0;
2954}
2955
2956static int cpsw_update_channels(struct cpsw_priv *priv,
2957 struct ethtool_channels *ch)
2958{
2959 int ret;
2960
2961 ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2962 if (ret)
2963 return ret;
2964
2965 ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2966 if (ret)
2967 return ret;
2968
2969 return 0;
2970}
2971
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002972static void cpsw_suspend_data_pass(struct net_device *ndev)
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002973{
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002974 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002975 struct cpsw_slave *slave;
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002976 int i;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002977
2978 /* Disable NAPI scheduling */
2979 cpsw_intr_disable(cpsw);
2980
2981 /* Stop all transmit queues for every network device.
2982 * Disable re-using rx descriptors with dormant_on.
2983 */
2984 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2985 if (!(slave->ndev && netif_running(slave->ndev)))
2986 continue;
2987
2988 netif_tx_stop_all_queues(slave->ndev);
2989 netif_dormant_on(slave->ndev);
2990 }
2991
2992 /* Handle rest of tx packets and stop cpdma channels */
2993 cpdma_ctlr_stop(cpsw->dma);
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002994}
2995
2996static int cpsw_resume_data_pass(struct net_device *ndev)
2997{
2998 struct cpsw_priv *priv = netdev_priv(ndev);
2999 struct cpsw_common *cpsw = priv->cpsw;
3000 struct cpsw_slave *slave;
3001 int i, ret;
3002
3003 /* Allow rx packets handling */
3004 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
3005 if (slave->ndev && netif_running(slave->ndev))
3006 netif_dormant_off(slave->ndev);
3007
3008 /* After this receive is started */
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02003009 if (cpsw->usage_count) {
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003010 ret = cpsw_fill_rx_channels(priv);
3011 if (ret)
3012 return ret;
3013
3014 cpdma_ctlr_start(cpsw->dma);
3015 cpsw_intr_enable(cpsw);
3016 }
3017
3018 /* Resume transmit for every affected interface */
3019 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
3020 if (slave->ndev && netif_running(slave->ndev))
3021 netif_tx_start_all_queues(slave->ndev);
3022
3023 return 0;
3024}
3025
3026static int cpsw_set_channels(struct net_device *ndev,
3027 struct ethtool_channels *chs)
3028{
3029 struct cpsw_priv *priv = netdev_priv(ndev);
3030 struct cpsw_common *cpsw = priv->cpsw;
3031 struct cpsw_slave *slave;
3032 int i, ret;
3033
3034 ret = cpsw_check_ch_settings(cpsw, chs);
3035 if (ret < 0)
3036 return ret;
3037
3038 cpsw_suspend_data_pass(ndev);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03003039 ret = cpsw_update_channels(priv, chs);
3040 if (ret)
3041 goto err;
3042
3043 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
3044 if (!(slave->ndev && netif_running(slave->ndev)))
3045 continue;
3046
3047 /* Inform stack about new count of queues */
3048 ret = netif_set_real_num_tx_queues(slave->ndev,
3049 cpsw->tx_ch_num);
3050 if (ret) {
3051 dev_err(priv->dev, "cannot set real number of tx queues\n");
3052 goto err;
3053 }
3054
3055 ret = netif_set_real_num_rx_queues(slave->ndev,
3056 cpsw->rx_ch_num);
3057 if (ret) {
3058 dev_err(priv->dev, "cannot set real number of rx queues\n");
3059 goto err;
3060 }
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03003061 }
3062
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02003063 if (cpsw->usage_count)
Grygorii Strashko9763a892019-04-26 20:12:26 +03003064 cpsw_split_res(cpsw);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02003065
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003066 ret = cpsw_resume_data_pass(ndev);
3067 if (!ret)
3068 return 0;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03003069err:
3070 dev_err(priv->dev, "cannot update channels number, closing device\n");
3071 dev_close(ndev);
3072 return ret;
3073}
3074
Yegor Yefremova0909942016-11-28 09:41:33 +01003075static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
3076{
3077 struct cpsw_priv *priv = netdev_priv(ndev);
3078 struct cpsw_common *cpsw = priv->cpsw;
3079 int slave_no = cpsw_slave_index(cpsw, priv);
3080
3081 if (cpsw->slaves[slave_no].phy)
3082 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
3083 else
3084 return -EOPNOTSUPP;
3085}
3086
3087static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
3088{
3089 struct cpsw_priv *priv = netdev_priv(ndev);
3090 struct cpsw_common *cpsw = priv->cpsw;
3091 int slave_no = cpsw_slave_index(cpsw, priv);
3092
3093 if (cpsw->slaves[slave_no].phy)
3094 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
3095 else
3096 return -EOPNOTSUPP;
3097}
3098
Yegor Yefremov6bb10c22016-11-28 10:47:52 +01003099static int cpsw_nway_reset(struct net_device *ndev)
3100{
3101 struct cpsw_priv *priv = netdev_priv(ndev);
3102 struct cpsw_common *cpsw = priv->cpsw;
3103 int slave_no = cpsw_slave_index(cpsw, priv);
3104
3105 if (cpsw->slaves[slave_no].phy)
3106 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
3107 else
3108 return -EOPNOTSUPP;
3109}
3110
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003111static void cpsw_get_ringparam(struct net_device *ndev,
3112 struct ethtool_ringparam *ering)
3113{
3114 struct cpsw_priv *priv = netdev_priv(ndev);
3115 struct cpsw_common *cpsw = priv->cpsw;
3116
3117 /* not supported */
3118 ering->tx_max_pending = 0;
3119 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
Ivan Khoronzhukf89d21b2017-01-08 22:12:27 +02003120 ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003121 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
3122}
3123
3124static int cpsw_set_ringparam(struct net_device *ndev,
3125 struct ethtool_ringparam *ering)
3126{
3127 struct cpsw_priv *priv = netdev_priv(ndev);
3128 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003129 int ret;
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003130
3131 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
3132
3133 if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
Ivan Khoronzhukf89d21b2017-01-08 22:12:27 +02003134 ering->rx_pending < CPSW_MAX_QUEUES ||
3135 ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003136 return -EINVAL;
3137
3138 if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
3139 return 0;
3140
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003141 cpsw_suspend_data_pass(ndev);
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003142
3143 cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
3144
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02003145 if (cpsw->usage_count)
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003146 cpdma_chan_split_pool(cpsw->dma);
3147
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003148 ret = cpsw_resume_data_pass(ndev);
3149 if (!ret)
3150 return 0;
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003151
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003152 dev_err(&ndev->dev, "cannot set ring params, closing device\n");
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003153 dev_close(ndev);
3154 return ret;
3155}
3156
Mugunthan V Ndf828592012-03-18 20:17:54 +00003157static const struct ethtool_ops cpsw_ethtool_ops = {
3158 .get_drvinfo = cpsw_get_drvinfo,
3159 .get_msglevel = cpsw_get_msglevel,
3160 .set_msglevel = cpsw_set_msglevel,
3161 .get_link = ethtool_op_get_link,
Richard Cochran2e5b38a2012-10-29 08:45:20 +00003162 .get_ts_info = cpsw_get_ts_info,
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00003163 .get_coalesce = cpsw_get_coalesce,
3164 .set_coalesce = cpsw_set_coalesce,
Mugunthan V Nd9718542013-07-23 15:38:17 +05303165 .get_sset_count = cpsw_get_sset_count,
3166 .get_strings = cpsw_get_strings,
3167 .get_ethtool_stats = cpsw_get_ethtool_stats,
Mugunthan V N1923d6e2014-09-08 22:54:02 +05303168 .get_pauseparam = cpsw_get_pauseparam,
3169 .set_pauseparam = cpsw_set_pauseparam,
Matus Ujhelyid8a64422013-08-20 07:59:38 +02003170 .get_wol = cpsw_get_wol,
3171 .set_wol = cpsw_set_wol,
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05303172 .get_regs_len = cpsw_get_regs_len,
3173 .get_regs = cpsw_get_regs,
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03003174 .begin = cpsw_ethtool_op_begin,
3175 .complete = cpsw_ethtool_op_complete,
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03003176 .get_channels = cpsw_get_channels,
3177 .set_channels = cpsw_set_channels,
Philippe Reynes24798762016-10-08 17:46:15 +02003178 .get_link_ksettings = cpsw_get_link_ksettings,
3179 .set_link_ksettings = cpsw_set_link_ksettings,
Yegor Yefremova0909942016-11-28 09:41:33 +01003180 .get_eee = cpsw_get_eee,
3181 .set_eee = cpsw_set_eee,
Yegor Yefremov6bb10c22016-11-28 10:47:52 +01003182 .nway_reset = cpsw_nway_reset,
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003183 .get_ringparam = cpsw_get_ringparam,
3184 .set_ringparam = cpsw_set_ringparam,
Mugunthan V Ndf828592012-03-18 20:17:54 +00003185};
3186
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003187static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
Richard Cochran549985e2012-11-14 09:07:56 +00003188 u32 slave_reg_ofs, u32 sliver_reg_ofs)
Mugunthan V Ndf828592012-03-18 20:17:54 +00003189{
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003190 void __iomem *regs = cpsw->regs;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003191 int slave_num = slave->slave_num;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003192 struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003193
3194 slave->data = data;
Richard Cochran549985e2012-11-14 09:07:56 +00003195 slave->regs = regs + slave_reg_ofs;
3196 slave->sliver = regs + sliver_reg_ofs;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003197 slave->port_vlan = data->dual_emac_res_vlan;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003198}
3199
David Rivshin552165b2016-04-27 21:25:25 -04003200static int cpsw_probe_dt(struct cpsw_platform_data *data,
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003201 struct platform_device *pdev)
3202{
3203 struct device_node *node = pdev->dev.of_node;
3204 struct device_node *slave_node;
3205 int i = 0, ret;
3206 u32 prop;
3207
3208 if (!node)
3209 return -EINVAL;
3210
3211 if (of_property_read_u32(node, "slaves", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303212 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003213 return -EINVAL;
3214 }
3215 data->slaves = prop;
3216
Mugunthan V Ne86ac132013-03-11 23:16:35 +00003217 if (of_property_read_u32(node, "active_slave", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303218 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303219 return -EINVAL;
Richard Cochran78ca0b22012-10-29 08:45:18 +00003220 }
Mugunthan V Ne86ac132013-03-11 23:16:35 +00003221 data->active_slave = prop;
Richard Cochran78ca0b22012-10-29 08:45:18 +00003222
Kees Cooka86854d2018-06-12 14:07:58 -07003223 data->slave_data = devm_kcalloc(&pdev->dev,
3224 data->slaves,
3225 sizeof(struct cpsw_slave_data),
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303226 GFP_KERNEL);
Joe Perchesb2adaca2013-02-03 17:43:58 +00003227 if (!data->slave_data)
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303228 return -ENOMEM;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003229
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003230 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303231 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303232 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003233 }
3234 data->channels = prop;
3235
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003236 if (of_property_read_u32(node, "ale_entries", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303237 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303238 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003239 }
3240 data->ale_entries = prop;
3241
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003242 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303243 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303244 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003245 }
3246 data->bd_ram_size = prop;
3247
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003248 if (of_property_read_u32(node, "mac_control", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303249 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303250 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003251 }
3252 data->mac_control = prop;
3253
Markus Pargmann281abd92013-10-04 14:44:40 +02003254 if (of_property_read_bool(node, "dual_emac"))
3255 data->dual_emac = 1;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003256
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003257 /*
3258 * Populate all the child nodes here...
3259 */
3260 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3261 /* We do not want to force this, as in some cases may not have child */
3262 if (ret)
George Cherian88c99ff2014-05-12 10:21:19 +05303263 dev_warn(&pdev->dev, "Doesn't have any child node\n");
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003264
Ben Hutchings8658aaf2016-06-21 01:16:31 +01003265 for_each_available_child_of_node(node, slave_node) {
Richard Cochran549985e2012-11-14 09:07:56 +00003266 struct cpsw_slave_data *slave_data = data->slave_data + i;
3267 const void *mac_addr = NULL;
Richard Cochran549985e2012-11-14 09:07:56 +00003268 int lenp;
3269 const __be32 *parp;
Richard Cochran549985e2012-11-14 09:07:56 +00003270
Markus Pargmannf468b102013-10-04 14:44:39 +02003271 /* This is no slave child node, continue */
Rob Herringbf5849f2018-12-05 13:50:32 -06003272 if (!of_node_name_eq(slave_node, "slave"))
Markus Pargmannf468b102013-10-04 14:44:39 +02003273 continue;
3274
Grygorii Strashko3ff18842018-11-25 18:15:25 -06003275 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
3276 NULL);
3277 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
3278 IS_ERR(slave_data->ifphy)) {
3279 ret = PTR_ERR(slave_data->ifphy);
3280 dev_err(&pdev->dev,
3281 "%d: Error retrieving port phy: %d\n", i, ret);
3282 return ret;
3283 }
3284
David Rivshin552165b2016-04-27 21:25:25 -04003285 slave_data->phy_node = of_parse_phandle(slave_node,
3286 "phy-handle", 0);
David Rivshinf1eea5c2015-12-16 23:02:10 -05003287 parp = of_get_property(slave_node, "phy_id", &lenp);
David Rivshinae092b52016-04-27 21:38:26 -04003288 if (slave_data->phy_node) {
3289 dev_dbg(&pdev->dev,
Rob Herringf7ce9102017-07-18 16:43:19 -05003290 "slave[%d] using phy-handle=\"%pOF\"\n",
3291 i, slave_data->phy_node);
David Rivshinae092b52016-04-27 21:38:26 -04003292 } else if (of_phy_is_fixed_link(slave_node)) {
David Rivshindfc0a6d2015-12-16 23:02:11 -05003293 /* In the case of a fixed PHY, the DT node associated
3294 * to the PHY is the Ethernet MAC DT node.
3295 */
Markus Brunner1f71e8c2015-11-03 22:09:51 +01003296 ret = of_phy_register_fixed_link(slave_node);
Johan Hovold23a09872016-11-17 17:40:04 +01003297 if (ret) {
3298 if (ret != -EPROBE_DEFER)
3299 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
Markus Brunner1f71e8c2015-11-03 22:09:51 +01003300 return ret;
Johan Hovold23a09872016-11-17 17:40:04 +01003301 }
David Rivshin06cd6d62016-04-27 21:45:45 -04003302 slave_data->phy_node = of_node_get(slave_node);
David Rivshinf1eea5c2015-12-16 23:02:10 -05003303 } else if (parp) {
3304 u32 phyid;
3305 struct device_node *mdio_node;
3306 struct platform_device *mdio;
3307
3308 if (lenp != (sizeof(__be32) * 2)) {
3309 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
3310 goto no_phy_slave;
3311 }
3312 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
3313 phyid = be32_to_cpup(parp+1);
3314 mdio = of_find_device_by_node(mdio_node);
3315 of_node_put(mdio_node);
3316 if (!mdio) {
3317 dev_err(&pdev->dev, "Missing mdio platform device\n");
3318 return -EINVAL;
3319 }
3320 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
3321 PHY_ID_FMT, mdio->name, phyid);
Johan Hovold86e1d5a2016-11-17 17:39:59 +01003322 put_device(&mdio->dev);
David Rivshinf1eea5c2015-12-16 23:02:10 -05003323 } else {
David Rivshinae092b52016-04-27 21:38:26 -04003324 dev_err(&pdev->dev,
3325 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
3326 i);
Markus Brunner1f71e8c2015-11-03 22:09:51 +01003327 goto no_phy_slave;
3328 }
Mugunthan V N47276fc2014-10-24 18:51:33 +05303329 slave_data->phy_if = of_get_phy_mode(slave_node);
3330 if (slave_data->phy_if < 0) {
3331 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
3332 i);
3333 return slave_data->phy_if;
3334 }
3335
3336no_phy_slave:
Richard Cochran549985e2012-11-14 09:07:56 +00003337 mac_addr = of_get_mac_address(slave_node);
Markus Pargmann0ba517b2014-09-29 08:53:17 +02003338 if (mac_addr) {
Richard Cochran549985e2012-11-14 09:07:56 +00003339 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
Markus Pargmann0ba517b2014-09-29 08:53:17 +02003340 } else {
Mugunthan V Nb6745f62015-09-21 15:56:50 +05303341 ret = ti_cm_get_macid(&pdev->dev, i,
3342 slave_data->mac_addr);
3343 if (ret)
3344 return ret;
Markus Pargmann0ba517b2014-09-29 08:53:17 +02003345 }
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003346 if (data->dual_emac) {
Mugunthan V N91c41662013-04-15 07:31:28 +00003347 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003348 &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303349 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003350 slave_data->dual_emac_res_vlan = i+1;
George Cherian88c99ff2014-05-12 10:21:19 +05303351 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
3352 slave_data->dual_emac_res_vlan, i);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003353 } else {
3354 slave_data->dual_emac_res_vlan = prop;
3355 }
3356 }
3357
Richard Cochran549985e2012-11-14 09:07:56 +00003358 i++;
Mugunthan V N3a27bfa2013-12-02 12:53:39 +05303359 if (i == data->slaves)
3360 break;
Richard Cochran549985e2012-11-14 09:07:56 +00003361 }
3362
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003363 return 0;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003364}
3365
Johan Hovolda4e32b02016-11-17 17:40:00 +01003366static void cpsw_remove_dt(struct platform_device *pdev)
3367{
Johan Hovold8cbcc462016-11-17 17:40:01 +01003368 struct net_device *ndev = platform_get_drvdata(pdev);
3369 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3370 struct cpsw_platform_data *data = &cpsw->data;
3371 struct device_node *node = pdev->dev.of_node;
3372 struct device_node *slave_node;
3373 int i = 0;
3374
3375 for_each_available_child_of_node(node, slave_node) {
3376 struct cpsw_slave_data *slave_data = &data->slave_data[i];
3377
Rob Herringbf5849f2018-12-05 13:50:32 -06003378 if (!of_node_name_eq(slave_node, "slave"))
Johan Hovold8cbcc462016-11-17 17:40:01 +01003379 continue;
3380
Johan Hovold3f650472016-11-28 19:24:55 +01003381 if (of_phy_is_fixed_link(slave_node))
3382 of_phy_deregister_fixed_link(slave_node);
Johan Hovold8cbcc462016-11-17 17:40:01 +01003383
3384 of_node_put(slave_data->phy_node);
3385
3386 i++;
3387 if (i == data->slaves)
3388 break;
3389 }
3390
Johan Hovolda4e32b02016-11-17 17:40:00 +01003391 of_platform_depopulate(&pdev->dev);
3392}
3393
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003394static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003395{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003396 struct cpsw_common *cpsw = priv->cpsw;
3397 struct cpsw_platform_data *data = &cpsw->data;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003398 struct net_device *ndev;
3399 struct cpsw_priv *priv_sl2;
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03003400 int ret = 0;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003401
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03003402 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003403 if (!ndev) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003404 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003405 return -ENOMEM;
3406 }
3407
3408 priv_sl2 = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003409 priv_sl2->cpsw = cpsw;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003410 priv_sl2->ndev = ndev;
3411 priv_sl2->dev = &ndev->dev;
3412 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003413
3414 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
3415 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
3416 ETH_ALEN);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003417 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
3418 priv_sl2->mac_addr);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003419 } else {
Joe Perches6c1f0a12018-06-22 10:51:00 -07003420 eth_random_addr(priv_sl2->mac_addr);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003421 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
3422 priv_sl2->mac_addr);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003423 }
3424 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
3425
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003426 priv_sl2->emac_port = 1;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003427 cpsw->slaves[1].ndev = ndev;
Ivan Khoronzhuk193736c2018-07-27 19:54:39 +03003428 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003429
3430 ndev->netdev_ops = &cpsw_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003431 ndev->ethtool_ops = &cpsw_ethtool_ops;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003432
3433 /* register the network device */
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003434 SET_NETDEV_DEV(ndev, cpsw->dev);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003435 ret = register_netdev(ndev);
3436 if (ret) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003437 dev_err(cpsw->dev, "cpsw: error registering net device\n");
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003438 free_netdev(ndev);
3439 ret = -ENODEV;
3440 }
3441
3442 return ret;
3443}
3444
Mugunthan V N7da11602015-08-12 15:22:53 +05303445static const struct of_device_id cpsw_of_mtable[] = {
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003446 { .compatible = "ti,cpsw"},
3447 { .compatible = "ti,am335x-cpsw"},
3448 { .compatible = "ti,am4372-cpsw"},
3449 { .compatible = "ti,dra7-cpsw"},
Mugunthan V N7da11602015-08-12 15:22:53 +05303450 { /* sentinel */ },
3451};
3452MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
3453
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003454static const struct soc_device_attribute cpsw_soc_devices[] = {
3455 { .family = "AM33xx", .revision = "ES1.0"},
3456 { /* sentinel */ }
3457};
3458
Bill Pemberton663e12e2012-12-03 09:23:45 -05003459static int cpsw_probe(struct platform_device *pdev)
Mugunthan V Ndf828592012-03-18 20:17:54 +00003460{
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003461 struct device *dev = &pdev->dev;
Ivan Khoronzhukef4183a2016-08-10 02:22:35 +03003462 struct clk *clk;
Sebastian Siewiord1bd9ac2013-04-24 08:48:23 +00003463 struct cpsw_platform_data *data;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003464 struct net_device *ndev;
3465 struct cpsw_priv *priv;
3466 struct cpdma_params dma_params;
3467 struct cpsw_ale_params ale_params;
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303468 void __iomem *ss_regs;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003469 void __iomem *cpts_regs;
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303470 struct resource *res, *ss_res;
Mugunthan V N1d147cc2015-09-07 15:16:44 +05303471 struct gpio_descs *mode;
Richard Cochran549985e2012-11-14 09:07:56 +00003472 u32 slave_offset, sliver_offset, slave_size;
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003473 const struct soc_device_attribute *soc;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003474 struct cpsw_common *cpsw;
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03003475 int ret = 0, i, ch;
Felipe Balbi5087b912015-01-16 10:11:11 -06003476 int irq;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003477
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003478 cpsw = devm_kzalloc(dev, sizeof(struct cpsw_common), GFP_KERNEL);
Johan Hovold3420ea82016-11-17 17:40:03 +01003479 if (!cpsw)
3480 return -ENOMEM;
3481
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003482 cpsw->dev = dev;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003483
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03003484 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003485 if (!ndev) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003486 dev_err(dev, "error allocating net_device\n");
Mugunthan V Ndf828592012-03-18 20:17:54 +00003487 return -ENOMEM;
3488 }
3489
3490 platform_set_drvdata(pdev, ndev);
3491 priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003492 priv->cpsw = cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003493 priv->ndev = ndev;
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003494 priv->dev = dev;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003495 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003496 cpsw->rx_packet_max = max(rx_packet_max, 128);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003497
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003498 mode = devm_gpiod_get_array_optional(dev, "mode", GPIOD_OUT_LOW);
Mugunthan V N1d147cc2015-09-07 15:16:44 +05303499 if (IS_ERR(mode)) {
3500 ret = PTR_ERR(mode);
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003501 dev_err(dev, "gpio request failed, ret %d\n", ret);
Mugunthan V N1d147cc2015-09-07 15:16:44 +05303502 goto clean_ndev_ret;
3503 }
3504
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003505 /*
3506 * This may be required here for child devices.
3507 */
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003508 pm_runtime_enable(dev);
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003509
Mugunthan V N739683b2013-06-06 23:45:14 +05303510 /* Select default pin state */
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003511 pinctrl_pm_select_default_state(dev);
Mugunthan V N739683b2013-06-06 23:45:14 +05303512
Johan Hovolda4e32b02016-11-17 17:40:00 +01003513 /* Need to enable clocks with runtime PM api to access module
3514 * registers
3515 */
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003516 ret = pm_runtime_get_sync(dev);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003517 if (ret < 0) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003518 pm_runtime_put_noidle(dev);
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303519 goto clean_runtime_disable_ret;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003520 }
Johan Hovolda4e32b02016-11-17 17:40:00 +01003521
Johan Hovold23a09872016-11-17 17:40:04 +01003522 ret = cpsw_probe_dt(&cpsw->data, pdev);
3523 if (ret)
Johan Hovolda4e32b02016-11-17 17:40:00 +01003524 goto clean_dt_ret;
Johan Hovold23a09872016-11-17 17:40:04 +01003525
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003526 data = &cpsw->data;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03003527 cpsw->rx_ch_num = 1;
3528 cpsw->tx_ch_num = 1;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003529
Mugunthan V Ndf828592012-03-18 20:17:54 +00003530 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
3531 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003532 dev_info(dev, "Detected MACID = %pM\n", priv->mac_addr);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003533 } else {
Joe Perches7efd26d2012-07-12 19:33:06 +00003534 eth_random_addr(priv->mac_addr);
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003535 dev_info(dev, "Random MACID = %pM\n", priv->mac_addr);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003536 }
3537
3538 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
3539
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003540 cpsw->slaves = devm_kcalloc(dev,
Kees Cooka86854d2018-06-12 14:07:58 -07003541 data->slaves, sizeof(struct cpsw_slave),
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303542 GFP_KERNEL);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003543 if (!cpsw->slaves) {
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303544 ret = -ENOMEM;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003545 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003546 }
3547 for (i = 0; i < data->slaves; i++)
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003548 cpsw->slaves[i].slave_num = i;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003549
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003550 cpsw->slaves[0].ndev = ndev;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003551 priv->emac_port = 0;
3552
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003553 clk = devm_clk_get(dev, "fck");
Ivan Khoronzhukef4183a2016-08-10 02:22:35 +03003554 if (IS_ERR(clk)) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003555 dev_err(dev, "fck is not found\n");
Mugunthan V Nf150bd72012-07-17 08:09:50 +00003556 ret = -ENODEV;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003557 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003558 }
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003559 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003560
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303561 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003562 ss_regs = devm_ioremap_resource(dev, ss_res);
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303563 if (IS_ERR(ss_regs)) {
3564 ret = PTR_ERR(ss_regs);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003565 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003566 }
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003567 cpsw->regs = ss_regs;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003568
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003569 cpsw->version = readl(&cpsw->regs->id_ver);
Mugunthan V Nf280e892013-12-11 22:09:05 -06003570
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303571 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003572 cpsw->wr_regs = devm_ioremap_resource(dev, res);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003573 if (IS_ERR(cpsw->wr_regs)) {
3574 ret = PTR_ERR(cpsw->wr_regs);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003575 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003576 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00003577
3578 memset(&dma_params, 0, sizeof(dma_params));
Richard Cochran549985e2012-11-14 09:07:56 +00003579 memset(&ale_params, 0, sizeof(ale_params));
3580
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003581 switch (cpsw->version) {
Richard Cochran549985e2012-11-14 09:07:56 +00003582 case CPSW_VERSION_1:
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003583 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003584 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003585 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
Richard Cochran549985e2012-11-14 09:07:56 +00003586 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
3587 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
3588 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
3589 slave_offset = CPSW1_SLAVE_OFFSET;
3590 slave_size = CPSW1_SLAVE_SIZE;
3591 sliver_offset = CPSW1_SLIVER_OFFSET;
3592 dma_params.desc_mem_phys = 0;
3593 break;
3594 case CPSW_VERSION_2:
Mugunthan V Nc193f362013-08-05 17:30:05 +05303595 case CPSW_VERSION_3:
Mugunthan V N926489b2013-08-12 17:11:15 +05303596 case CPSW_VERSION_4:
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003597 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003598 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003599 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
Richard Cochran549985e2012-11-14 09:07:56 +00003600 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
3601 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
3602 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
3603 slave_offset = CPSW2_SLAVE_OFFSET;
3604 slave_size = CPSW2_SLAVE_SIZE;
3605 sliver_offset = CPSW2_SLIVER_OFFSET;
3606 dma_params.desc_mem_phys =
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303607 (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
Richard Cochran549985e2012-11-14 09:07:56 +00003608 break;
3609 default:
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003610 dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
Richard Cochran549985e2012-11-14 09:07:56 +00003611 ret = -ENODEV;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003612 goto clean_dt_ret;
Richard Cochran549985e2012-11-14 09:07:56 +00003613 }
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003614 for (i = 0; i < cpsw->data.slaves; i++) {
3615 struct cpsw_slave *slave = &cpsw->slaves[i];
3616
3617 cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
Richard Cochran549985e2012-11-14 09:07:56 +00003618 slave_offset += slave_size;
3619 sliver_offset += SLIVER_SIZE;
3620 }
3621
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003622 dma_params.dev = dev;
Richard Cochran549985e2012-11-14 09:07:56 +00003623 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
3624 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
3625 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
3626 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
3627 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003628
3629 dma_params.num_chan = data->channels;
3630 dma_params.has_soft_reset = true;
3631 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
3632 dma_params.desc_mem_size = data->bd_ram_size;
3633 dma_params.desc_align = 16;
3634 dma_params.has_ext_regs = true;
Richard Cochran549985e2012-11-14 09:07:56 +00003635 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02003636 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
Grygorii Strashko90225bf2017-01-06 14:07:33 -06003637 dma_params.descs_pool_size = descs_pool_size;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003638
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03003639 cpsw->dma = cpdma_ctlr_create(&dma_params);
3640 if (!cpsw->dma) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003641 dev_err(dev, "error initializing dma\n");
Mugunthan V Ndf828592012-03-18 20:17:54 +00003642 ret = -ENOMEM;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003643 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003644 }
3645
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003646 soc = soc_device_match(cpsw_soc_devices);
3647 if (soc)
3648 cpsw->quirk_irq = 1;
3649
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03003650 ch = cpsw->quirk_irq ? 0 : 7;
3651 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
Ivan Khoronzhuk8a83c5d2017-12-12 23:06:35 +02003652 if (IS_ERR(cpsw->txv[0].ch)) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003653 dev_err(dev, "error initializing tx dma channel\n");
Ivan Khoronzhuk8a83c5d2017-12-12 23:06:35 +02003654 ret = PTR_ERR(cpsw->txv[0].ch);
3655 goto clean_dma_ret;
3656 }
3657
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02003658 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
Ivan Khoronzhuk8a83c5d2017-12-12 23:06:35 +02003659 if (IS_ERR(cpsw->rxv[0].ch)) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003660 dev_err(dev, "error initializing rx dma channel\n");
Ivan Khoronzhuk8a83c5d2017-12-12 23:06:35 +02003661 ret = PTR_ERR(cpsw->rxv[0].ch);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003662 goto clean_dma_ret;
3663 }
3664
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003665 ale_params.dev = dev;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003666 ale_params.ale_ageout = ale_ageout;
3667 ale_params.ale_entries = data->ale_entries;
Grygorii Strashkoc6395f12017-11-30 18:21:14 -06003668 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003669
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003670 cpsw->ale = cpsw_ale_create(&ale_params);
3671 if (!cpsw->ale) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003672 dev_err(dev, "error initializing ale engine\n");
Mugunthan V Ndf828592012-03-18 20:17:54 +00003673 ret = -ENODEV;
3674 goto clean_dma_ret;
3675 }
3676
Grygorii Strashko4a88fb92016-12-06 18:00:42 -06003677 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003678 if (IS_ERR(cpsw->cpts)) {
3679 ret = PTR_ERR(cpsw->cpts);
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003680 goto clean_dma_ret;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003681 }
3682
Felipe Balbic03abd82015-01-16 10:11:12 -06003683 ndev->irq = platform_get_irq(pdev, 1);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003684 if (ndev->irq < 0) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003685 dev_err(dev, "error getting irq resource\n");
Julia Lawallc1e33342015-12-26 20:12:13 +01003686 ret = ndev->irq;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003687 goto clean_dma_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003688 }
3689
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -05003690 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
Keerthy070f9c62017-07-20 16:59:52 +05303691
3692 ndev->netdev_ops = &cpsw_netdev_ops;
3693 ndev->ethtool_ops = &cpsw_ethtool_ops;
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003694 netif_napi_add(ndev, &cpsw->napi_rx,
3695 cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
3696 CPSW_POLL_WEIGHT);
3697 netif_tx_napi_add(ndev, &cpsw->napi_tx,
3698 cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
3699 CPSW_POLL_WEIGHT);
Grygorii Strashko9763a892019-04-26 20:12:26 +03003700 cpsw_split_res(cpsw);
Keerthy070f9c62017-07-20 16:59:52 +05303701
3702 /* register the network device */
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003703 SET_NETDEV_DEV(ndev, dev);
Keerthy070f9c62017-07-20 16:59:52 +05303704 ret = register_netdev(ndev);
3705 if (ret) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003706 dev_err(dev, "error registering net device\n");
Keerthy070f9c62017-07-20 16:59:52 +05303707 ret = -ENODEV;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003708 goto clean_dma_ret;
Keerthy070f9c62017-07-20 16:59:52 +05303709 }
3710
3711 if (cpsw->data.dual_emac) {
3712 ret = cpsw_probe_dual_emac(priv);
3713 if (ret) {
3714 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3715 goto clean_unregister_netdev_ret;
3716 }
3717 }
3718
Felipe Balbic03abd82015-01-16 10:11:12 -06003719 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3720 * MISC IRQs which are always kept disabled with this driver so
3721 * we will not request them.
3722 *
3723 * If anyone wants to implement support for those, make sure to
3724 * first request and append them to irqs_table array.
3725 */
Daniel Mackc2b32e52014-09-04 09:00:23 +02003726
Felipe Balbic03abd82015-01-16 10:11:12 -06003727 /* RX IRQ */
Felipe Balbi5087b912015-01-16 10:11:11 -06003728 irq = platform_get_irq(pdev, 1);
Julia Lawallc1e33342015-12-26 20:12:13 +01003729 if (irq < 0) {
3730 ret = irq;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003731 goto clean_dma_ret;
Julia Lawallc1e33342015-12-26 20:12:13 +01003732 }
Felipe Balbi5087b912015-01-16 10:11:11 -06003733
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03003734 cpsw->irqs_table[0] = irq;
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003735 ret = devm_request_irq(dev, irq, cpsw_rx_interrupt,
3736 0, dev_name(dev), cpsw);
Felipe Balbi5087b912015-01-16 10:11:11 -06003737 if (ret < 0) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003738 dev_err(dev, "error attaching irq (%d)\n", ret);
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003739 goto clean_dma_ret;
Felipe Balbi5087b912015-01-16 10:11:11 -06003740 }
3741
Felipe Balbic03abd82015-01-16 10:11:12 -06003742 /* TX IRQ */
Felipe Balbi5087b912015-01-16 10:11:11 -06003743 irq = platform_get_irq(pdev, 2);
Julia Lawallc1e33342015-12-26 20:12:13 +01003744 if (irq < 0) {
3745 ret = irq;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003746 goto clean_dma_ret;
Julia Lawallc1e33342015-12-26 20:12:13 +01003747 }
Felipe Balbi5087b912015-01-16 10:11:11 -06003748
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03003749 cpsw->irqs_table[1] = irq;
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003750 ret = devm_request_irq(dev, irq, cpsw_tx_interrupt,
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03003751 0, dev_name(&pdev->dev), cpsw);
Felipe Balbi5087b912015-01-16 10:11:11 -06003752 if (ret < 0) {
Grygorii Strashkoc8fb5662019-04-26 20:12:27 +03003753 dev_err(dev, "error attaching irq (%d)\n", ret);
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003754 goto clean_dma_ret;
Felipe Balbi5087b912015-01-16 10:11:11 -06003755 }
Daniel Mackc2b32e52014-09-04 09:00:23 +02003756
Grygorii Strashko90225bf2017-01-06 14:07:33 -06003757 cpsw_notice(priv, probe,
3758 "initialized device (regs %pa, irq %d, pool size %d)\n",
3759 &ss_res->start, ndev->irq, dma_params.descs_pool_size);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003760
Johan Hovoldc46ab7e2016-11-17 17:39:58 +01003761 pm_runtime_put(&pdev->dev);
3762
Mugunthan V Ndf828592012-03-18 20:17:54 +00003763 return 0;
3764
Johan Hovolda7fe9d42016-11-17 17:40:02 +01003765clean_unregister_netdev_ret:
3766 unregister_netdev(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003767clean_dma_ret:
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03003768 cpdma_ctlr_destroy(cpsw->dma);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003769clean_dt_ret:
3770 cpsw_remove_dt(pdev);
Johan Hovoldc46ab7e2016-11-17 17:39:58 +01003771 pm_runtime_put_sync(&pdev->dev);
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303772clean_runtime_disable_ret:
Mugunthan V Nf150bd72012-07-17 08:09:50 +00003773 pm_runtime_disable(&pdev->dev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003774clean_ndev_ret:
Sebastian Siewiord1bd9ac2013-04-24 08:48:23 +00003775 free_netdev(priv->ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003776 return ret;
3777}
3778
Bill Pemberton663e12e2012-12-03 09:23:45 -05003779static int cpsw_remove(struct platform_device *pdev)
Mugunthan V Ndf828592012-03-18 20:17:54 +00003780{
3781 struct net_device *ndev = platform_get_drvdata(pdev);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003782 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Grygorii Strashko8a0b6dc2016-07-28 20:50:35 +03003783 int ret;
3784
3785 ret = pm_runtime_get_sync(&pdev->dev);
3786 if (ret < 0) {
3787 pm_runtime_put_noidle(&pdev->dev);
3788 return ret;
3789 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00003790
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003791 if (cpsw->data.dual_emac)
3792 unregister_netdev(cpsw->slaves[1].ndev);
Sebastian Siewiord1bd9ac2013-04-24 08:48:23 +00003793 unregister_netdev(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003794
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003795 cpts_release(cpsw->cpts);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03003796 cpdma_ctlr_destroy(cpsw->dma);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003797 cpsw_remove_dt(pdev);
Grygorii Strashko8a0b6dc2016-07-28 20:50:35 +03003798 pm_runtime_put_sync(&pdev->dev);
3799 pm_runtime_disable(&pdev->dev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003800 if (cpsw->data.dual_emac)
3801 free_netdev(cpsw->slaves[1].ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003802 free_netdev(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003803 return 0;
3804}
3805
Grygorii Strashko8963a502015-02-27 13:19:45 +02003806#ifdef CONFIG_PM_SLEEP
Mugunthan V Ndf828592012-03-18 20:17:54 +00003807static int cpsw_suspend(struct device *dev)
3808{
Wolfram Sang4e13c2522018-10-21 22:00:17 +02003809 struct net_device *ndev = dev_get_drvdata(dev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003810 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003811
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003812 if (cpsw->data.dual_emac) {
Mugunthan V N618073e2014-09-11 22:52:38 +05303813 int i;
Daniel Mack1e7a2e22013-11-15 08:29:16 +01003814
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003815 for (i = 0; i < cpsw->data.slaves; i++) {
3816 if (netif_running(cpsw->slaves[i].ndev))
3817 cpsw_ndo_stop(cpsw->slaves[i].ndev);
Mugunthan V N618073e2014-09-11 22:52:38 +05303818 }
3819 } else {
3820 if (netif_running(ndev))
3821 cpsw_ndo_stop(ndev);
Mugunthan V N618073e2014-09-11 22:52:38 +05303822 }
Daniel Mack1e7a2e22013-11-15 08:29:16 +01003823
Mugunthan V N739683b2013-06-06 23:45:14 +05303824 /* Select sleep pin state */
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003825 pinctrl_pm_select_sleep_state(dev);
Mugunthan V N739683b2013-06-06 23:45:14 +05303826
Mugunthan V Ndf828592012-03-18 20:17:54 +00003827 return 0;
3828}
3829
3830static int cpsw_resume(struct device *dev)
3831{
Wolfram Sang4e13c2522018-10-21 22:00:17 +02003832 struct net_device *ndev = dev_get_drvdata(dev);
Ivan Khoronzhuka60ced92017-02-14 14:42:15 +02003833 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003834
Mugunthan V N739683b2013-06-06 23:45:14 +05303835 /* Select default pin state */
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003836 pinctrl_pm_select_default_state(dev);
Mugunthan V N739683b2013-06-06 23:45:14 +05303837
Grygorii Strashko4ccfd632016-11-29 16:27:03 -06003838 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3839 rtnl_lock();
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003840 if (cpsw->data.dual_emac) {
Mugunthan V N618073e2014-09-11 22:52:38 +05303841 int i;
3842
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003843 for (i = 0; i < cpsw->data.slaves; i++) {
3844 if (netif_running(cpsw->slaves[i].ndev))
3845 cpsw_ndo_open(cpsw->slaves[i].ndev);
Mugunthan V N618073e2014-09-11 22:52:38 +05303846 }
3847 } else {
3848 if (netif_running(ndev))
3849 cpsw_ndo_open(ndev);
3850 }
Grygorii Strashko4ccfd632016-11-29 16:27:03 -06003851 rtnl_unlock();
3852
Mugunthan V Ndf828592012-03-18 20:17:54 +00003853 return 0;
3854}
Grygorii Strashko8963a502015-02-27 13:19:45 +02003855#endif
Mugunthan V Ndf828592012-03-18 20:17:54 +00003856
Grygorii Strashko8963a502015-02-27 13:19:45 +02003857static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003858
3859static struct platform_driver cpsw_driver = {
3860 .driver = {
3861 .name = "cpsw",
Mugunthan V Ndf828592012-03-18 20:17:54 +00003862 .pm = &cpsw_pm_ops,
Sachin Kamat1e5c76d2013-09-30 09:55:12 +05303863 .of_match_table = cpsw_of_mtable,
Mugunthan V Ndf828592012-03-18 20:17:54 +00003864 },
3865 .probe = cpsw_probe,
Bill Pemberton663e12e2012-12-03 09:23:45 -05003866 .remove = cpsw_remove,
Mugunthan V Ndf828592012-03-18 20:17:54 +00003867};
3868
Grygorii Strashko6fb3b6b52015-10-23 14:41:12 +03003869module_platform_driver(cpsw_driver);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003870
3871MODULE_LICENSE("GPL");
3872MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3873MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3874MODULE_DESCRIPTION("TI CPSW Ethernet driver");