blob: 94e5e5b791ecbdfc69b7d381160f29128f0ac6e2 [file] [log] [blame]
Mugunthan V Ndf828592012-03-18 20:17:54 +00001/*
2 * Texas Instruments Ethernet Switch Driver
3 *
4 * Copyright (C) 2012 Texas Instruments
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License as
8 * published by the Free Software Foundation version 2.
9 *
10 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11 * kind, whether express or implied; without even the implied warranty
12 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/kernel.h>
17#include <linux/io.h>
18#include <linux/clk.h>
19#include <linux/timer.h>
20#include <linux/module.h>
21#include <linux/platform_device.h>
22#include <linux/irqreturn.h>
23#include <linux/interrupt.h>
24#include <linux/if_ether.h>
25#include <linux/etherdevice.h>
26#include <linux/netdevice.h>
Richard Cochran2e5b38a2012-10-29 08:45:20 +000027#include <linux/net_tstamp.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000028#include <linux/phy.h>
Grygorii Strashko3ff18842018-11-25 18:15:25 -060029#include <linux/phy/phy.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000030#include <linux/workqueue.h>
31#include <linux/delay.h>
Mugunthan V Nf150bd72012-07-17 08:09:50 +000032#include <linux/pm_runtime.h>
Arnd Bergmanne2b3e492018-05-30 23:51:54 +020033#include <linux/gpio/consumer.h>
Mugunthan V N2eb32b02012-07-30 10:17:14 +000034#include <linux/of.h>
Heiko Schocher9e42f712015-10-17 06:04:35 +020035#include <linux/of_mdio.h>
Mugunthan V N2eb32b02012-07-30 10:17:14 +000036#include <linux/of_net.h>
37#include <linux/of_device.h>
Mugunthan V N3b72c2f2013-02-05 08:26:48 +000038#include <linux/if_vlan.h>
Randy Dunlap514c6032018-04-05 16:25:34 -070039#include <linux/kmemleak.h>
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +030040#include <linux/sys_soc.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000041
Mugunthan V N739683b2013-06-06 23:45:14 +053042#include <linux/pinctrl/consumer.h>
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +030043#include <net/pkt_cls.h>
Mugunthan V Ndf828592012-03-18 20:17:54 +000044
Mugunthan V Ndbe34722013-08-19 17:47:40 +053045#include "cpsw.h"
Mugunthan V Ndf828592012-03-18 20:17:54 +000046#include "cpsw_ale.h"
Richard Cochran2e5b38a2012-10-29 08:45:20 +000047#include "cpts.h"
Mugunthan V Ndf828592012-03-18 20:17:54 +000048#include "davinci_cpdma.h"
49
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +030050#include <net/pkt_sched.h>
51
Mugunthan V Ndf828592012-03-18 20:17:54 +000052#define CPSW_DEBUG (NETIF_MSG_HW | NETIF_MSG_WOL | \
53 NETIF_MSG_DRV | NETIF_MSG_LINK | \
54 NETIF_MSG_IFUP | NETIF_MSG_INTR | \
55 NETIF_MSG_PROBE | NETIF_MSG_TIMER | \
56 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR | \
57 NETIF_MSG_TX_ERR | NETIF_MSG_TX_DONE | \
58 NETIF_MSG_PKTDATA | NETIF_MSG_TX_QUEUED | \
59 NETIF_MSG_RX_STATUS)
60
61#define cpsw_info(priv, type, format, ...) \
62do { \
63 if (netif_msg_##type(priv) && net_ratelimit()) \
64 dev_info(priv->dev, format, ## __VA_ARGS__); \
65} while (0)
66
67#define cpsw_err(priv, type, format, ...) \
68do { \
69 if (netif_msg_##type(priv) && net_ratelimit()) \
70 dev_err(priv->dev, format, ## __VA_ARGS__); \
71} while (0)
72
73#define cpsw_dbg(priv, type, format, ...) \
74do { \
75 if (netif_msg_##type(priv) && net_ratelimit()) \
76 dev_dbg(priv->dev, format, ## __VA_ARGS__); \
77} while (0)
78
79#define cpsw_notice(priv, type, format, ...) \
80do { \
81 if (netif_msg_##type(priv) && net_ratelimit()) \
82 dev_notice(priv->dev, format, ## __VA_ARGS__); \
83} while (0)
84
Mugunthan V N5c50a852012-10-29 08:45:11 +000085#define ALE_ALL_PORTS 0x7
86
Mugunthan V Ndf828592012-03-18 20:17:54 +000087#define CPSW_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
88#define CPSW_MINOR_VERSION(reg) (reg & 0xff)
89#define CPSW_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
90
Richard Cochrane90cfac2012-10-29 08:45:14 +000091#define CPSW_VERSION_1 0x19010a
92#define CPSW_VERSION_2 0x19010c
Mugunthan V Nc193f362013-08-05 17:30:05 +053093#define CPSW_VERSION_3 0x19010f
Mugunthan V N926489b2013-08-12 17:11:15 +053094#define CPSW_VERSION_4 0x190112
Richard Cochran549985e2012-11-14 09:07:56 +000095
96#define HOST_PORT_NUM 0
Grygorii Strashkoc6395f12017-11-30 18:21:14 -060097#define CPSW_ALE_PORTS_NUM 3
Richard Cochran549985e2012-11-14 09:07:56 +000098#define SLIVER_SIZE 0x40
99
100#define CPSW1_HOST_PORT_OFFSET 0x028
101#define CPSW1_SLAVE_OFFSET 0x050
102#define CPSW1_SLAVE_SIZE 0x040
103#define CPSW1_CPDMA_OFFSET 0x100
104#define CPSW1_STATERAM_OFFSET 0x200
Mugunthan V Nd9718542013-07-23 15:38:17 +0530105#define CPSW1_HW_STATS 0x400
Richard Cochran549985e2012-11-14 09:07:56 +0000106#define CPSW1_CPTS_OFFSET 0x500
107#define CPSW1_ALE_OFFSET 0x600
108#define CPSW1_SLIVER_OFFSET 0x700
109
110#define CPSW2_HOST_PORT_OFFSET 0x108
111#define CPSW2_SLAVE_OFFSET 0x200
112#define CPSW2_SLAVE_SIZE 0x100
113#define CPSW2_CPDMA_OFFSET 0x800
Mugunthan V Nd9718542013-07-23 15:38:17 +0530114#define CPSW2_HW_STATS 0x900
Richard Cochran549985e2012-11-14 09:07:56 +0000115#define CPSW2_STATERAM_OFFSET 0xa00
116#define CPSW2_CPTS_OFFSET 0xc00
117#define CPSW2_ALE_OFFSET 0xd00
118#define CPSW2_SLIVER_OFFSET 0xd80
119#define CPSW2_BD_OFFSET 0x2000
120
Mugunthan V Ndf828592012-03-18 20:17:54 +0000121#define CPDMA_RXTHRESH 0x0c0
122#define CPDMA_RXFREE 0x0e0
123#define CPDMA_TXHDP 0x00
124#define CPDMA_RXHDP 0x20
125#define CPDMA_TXCP 0x40
126#define CPDMA_RXCP 0x60
127
Mugunthan V Ndf828592012-03-18 20:17:54 +0000128#define CPSW_POLL_WEIGHT 64
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500129#define CPSW_RX_VLAN_ENCAP_HDR_SIZE 4
Grygorii Strashko9421c902017-11-15 09:46:35 -0600130#define CPSW_MIN_PACKET_SIZE (VLAN_ETH_ZLEN)
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500131#define CPSW_MAX_PACKET_SIZE (VLAN_ETH_FRAME_LEN +\
132 ETH_FCS_LEN +\
133 CPSW_RX_VLAN_ENCAP_HDR_SIZE)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000134
135#define RX_PRIORITY_MAPPING 0x76543210
136#define TX_PRIORITY_MAPPING 0x33221100
Ivan Khoronzhuk5e391dc52018-04-19 22:49:09 +0300137#define CPDMA_TX_PRIORITY_MAP 0x76543210
Mugunthan V Ndf828592012-03-18 20:17:54 +0000138
Mugunthan V N3b72c2f2013-02-05 08:26:48 +0000139#define CPSW_VLAN_AWARE BIT(1)
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500140#define CPSW_RX_VLAN_ENCAP BIT(2)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +0000141#define CPSW_ALE_VLAN_AWARE 1
142
John Ogness35717d82014-11-14 15:42:52 +0100143#define CPSW_FIFO_NORMAL_MODE (0 << 16)
144#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
145#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000146
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +0000147#define CPSW_INTPACEEN (0x3f << 16)
148#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
149#define CPSW_CMINTMAX_CNT 63
150#define CPSW_CMINTMIN_CNT 2
151#define CPSW_CMINTMAX_INTVL (1000 / CPSW_CMINTMIN_CNT)
152#define CPSW_CMINTMIN_INTVL ((1000 / CPSW_CMINTMAX_CNT) + 1)
153
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300154#define cpsw_slave_index(cpsw, priv) \
155 ((cpsw->data.dual_emac) ? priv->emac_port : \
156 cpsw->data.active_slave)
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +0300157#define IRQ_NUM 2
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300158#define CPSW_MAX_QUEUES 8
Grygorii Strashko90225bf2017-01-06 14:07:33 -0600159#define CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT 256
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +0300160#define CPSW_FIFO_QUEUE_TYPE_SHIFT 16
161#define CPSW_FIFO_SHAPE_EN_SHIFT 16
162#define CPSW_FIFO_RATE_EN_SHIFT 20
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +0300163#define CPSW_TC_NUM 4
164#define CPSW_FIFO_SHAPERS_NUM (CPSW_TC_NUM - 1)
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +0300165#define CPSW_PCT_MASK 0x7f
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +0000166
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500167#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT 29
168#define CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK GENMASK(2, 0)
169#define CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT 16
170#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT 8
171#define CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK GENMASK(1, 0)
172enum {
173 CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG = 0,
174 CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV,
175 CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG,
176 CPSW_RX_VLAN_ENCAP_HDR_PKT_UNTAG,
177};
178
Mugunthan V Ndf828592012-03-18 20:17:54 +0000179static int debug_level;
180module_param(debug_level, int, 0);
181MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
182
183static int ale_ageout = 10;
184module_param(ale_ageout, int, 0);
185MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
186
187static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
188module_param(rx_packet_max, int, 0);
189MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
190
Grygorii Strashko90225bf2017-01-06 14:07:33 -0600191static int descs_pool_size = CPSW_CPDMA_DESCS_POOL_SIZE_DEFAULT;
192module_param(descs_pool_size, int, 0444);
193MODULE_PARM_DESC(descs_pool_size, "Number of CPDMA CPPI descriptors in pool");
194
Richard Cochran996a5c22012-10-29 08:45:12 +0000195struct cpsw_wr_regs {
Mugunthan V Ndf828592012-03-18 20:17:54 +0000196 u32 id_ver;
197 u32 soft_reset;
198 u32 control;
199 u32 int_control;
200 u32 rx_thresh_en;
201 u32 rx_en;
202 u32 tx_en;
203 u32 misc_en;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +0000204 u32 mem_allign1[8];
205 u32 rx_thresh_stat;
206 u32 rx_stat;
207 u32 tx_stat;
208 u32 misc_stat;
209 u32 mem_allign2[8];
210 u32 rx_imax;
211 u32 tx_imax;
212
Mugunthan V Ndf828592012-03-18 20:17:54 +0000213};
214
Richard Cochran996a5c22012-10-29 08:45:12 +0000215struct cpsw_ss_regs {
Mugunthan V Ndf828592012-03-18 20:17:54 +0000216 u32 id_ver;
217 u32 control;
218 u32 soft_reset;
219 u32 stat_port_en;
220 u32 ptype;
Richard Cochranbd357af2012-10-29 08:45:13 +0000221 u32 soft_idle;
222 u32 thru_rate;
223 u32 gap_thresh;
224 u32 tx_start_wds;
225 u32 flow_control;
226 u32 vlan_ltype;
227 u32 ts_ltype;
228 u32 dlr_ltype;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000229};
230
Richard Cochran9750a3a2012-10-29 08:45:15 +0000231/* CPSW_PORT_V1 */
232#define CPSW1_MAX_BLKS 0x00 /* Maximum FIFO Blocks */
233#define CPSW1_BLK_CNT 0x04 /* FIFO Block Usage Count (Read Only) */
234#define CPSW1_TX_IN_CTL 0x08 /* Transmit FIFO Control */
235#define CPSW1_PORT_VLAN 0x0c /* VLAN Register */
236#define CPSW1_TX_PRI_MAP 0x10 /* Tx Header Priority to Switch Pri Mapping */
237#define CPSW1_TS_CTL 0x14 /* Time Sync Control */
238#define CPSW1_TS_SEQ_LTYPE 0x18 /* Time Sync Sequence ID Offset and Msg Type */
239#define CPSW1_TS_VLAN 0x1c /* Time Sync VLAN1 and VLAN2 */
240
241/* CPSW_PORT_V2 */
242#define CPSW2_CONTROL 0x00 /* Control Register */
243#define CPSW2_MAX_BLKS 0x08 /* Maximum FIFO Blocks */
244#define CPSW2_BLK_CNT 0x0c /* FIFO Block Usage Count (Read Only) */
245#define CPSW2_TX_IN_CTL 0x10 /* Transmit FIFO Control */
246#define CPSW2_PORT_VLAN 0x14 /* VLAN Register */
247#define CPSW2_TX_PRI_MAP 0x18 /* Tx Header Priority to Switch Pri Mapping */
248#define CPSW2_TS_SEQ_MTYPE 0x1c /* Time Sync Sequence ID Offset and Msg Type */
249
250/* CPSW_PORT_V1 and V2 */
251#define SA_LO 0x20 /* CPGMAC_SL Source Address Low */
252#define SA_HI 0x24 /* CPGMAC_SL Source Address High */
253#define SEND_PERCENT 0x28 /* Transmit Queue Send Percentages */
254
255/* CPSW_PORT_V2 only */
256#define RX_DSCP_PRI_MAP0 0x30 /* Rx DSCP Priority to Rx Packet Mapping */
257#define RX_DSCP_PRI_MAP1 0x34 /* Rx DSCP Priority to Rx Packet Mapping */
258#define RX_DSCP_PRI_MAP2 0x38 /* Rx DSCP Priority to Rx Packet Mapping */
259#define RX_DSCP_PRI_MAP3 0x3c /* Rx DSCP Priority to Rx Packet Mapping */
260#define RX_DSCP_PRI_MAP4 0x40 /* Rx DSCP Priority to Rx Packet Mapping */
261#define RX_DSCP_PRI_MAP5 0x44 /* Rx DSCP Priority to Rx Packet Mapping */
262#define RX_DSCP_PRI_MAP6 0x48 /* Rx DSCP Priority to Rx Packet Mapping */
263#define RX_DSCP_PRI_MAP7 0x4c /* Rx DSCP Priority to Rx Packet Mapping */
264
265/* Bit definitions for the CPSW2_CONTROL register */
Ivan Khoronzhuk1239a962018-07-06 21:44:44 +0300266#define PASS_PRI_TAGGED BIT(24) /* Pass Priority Tagged */
267#define VLAN_LTYPE2_EN BIT(21) /* VLAN LTYPE 2 enable */
268#define VLAN_LTYPE1_EN BIT(20) /* VLAN LTYPE 1 enable */
269#define DSCP_PRI_EN BIT(16) /* DSCP Priority Enable */
Ivan Khoronzhuk1c0e8122018-07-06 21:44:45 +0300270#define TS_107 BIT(15) /* Tyme Sync Dest IP Address 107 */
Ivan Khoronzhuk1239a962018-07-06 21:44:44 +0300271#define TS_320 BIT(14) /* Time Sync Dest Port 320 enable */
272#define TS_319 BIT(13) /* Time Sync Dest Port 319 enable */
273#define TS_132 BIT(12) /* Time Sync Dest IP Addr 132 enable */
274#define TS_131 BIT(11) /* Time Sync Dest IP Addr 131 enable */
275#define TS_130 BIT(10) /* Time Sync Dest IP Addr 130 enable */
276#define TS_129 BIT(9) /* Time Sync Dest IP Addr 129 enable */
277#define TS_TTL_NONZERO BIT(8) /* Time Sync Time To Live Non-zero enable */
278#define TS_ANNEX_F_EN BIT(6) /* Time Sync Annex F enable */
279#define TS_ANNEX_D_EN BIT(4) /* Time Sync Annex D enable */
280#define TS_LTYPE2_EN BIT(3) /* Time Sync LTYPE 2 enable */
281#define TS_LTYPE1_EN BIT(2) /* Time Sync LTYPE 1 enable */
282#define TS_TX_EN BIT(1) /* Time Sync Transmit Enable */
283#define TS_RX_EN BIT(0) /* Time Sync Receive Enable */
Richard Cochran9750a3a2012-10-29 08:45:15 +0000284
George Cherian09c55372014-05-02 12:02:02 +0530285#define CTRL_V2_TS_BITS \
286 (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
287 TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
Richard Cochran9750a3a2012-10-29 08:45:15 +0000288
George Cherian09c55372014-05-02 12:02:02 +0530289#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
290#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
291#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
292
293
294#define CTRL_V3_TS_BITS \
Ivan Khoronzhuk1c0e8122018-07-06 21:44:45 +0300295 (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
George Cherian09c55372014-05-02 12:02:02 +0530296 TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
297 TS_LTYPE1_EN)
298
299#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
300#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
301#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
Richard Cochran9750a3a2012-10-29 08:45:15 +0000302
303/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
304#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
305#define TS_SEQ_ID_OFFSET_MASK (0x3f)
306#define TS_MSG_TYPE_EN_SHIFT (0) /* Time Sync Message Type Enable */
307#define TS_MSG_TYPE_EN_MASK (0xffff)
308
309/* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
310#define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
Mugunthan V Ndf828592012-03-18 20:17:54 +0000311
Richard Cochran2e5b38a2012-10-29 08:45:20 +0000312/* Bit definitions for the CPSW1_TS_CTL register */
313#define CPSW_V1_TS_RX_EN BIT(0)
314#define CPSW_V1_TS_TX_EN BIT(4)
315#define CPSW_V1_MSG_TYPE_OFS 16
316
317/* Bit definitions for the CPSW1_TS_SEQ_LTYPE register */
318#define CPSW_V1_SEQ_ID_OFS_SHIFT 16
319
Grygorii Strashko48f5bcc2017-05-08 14:21:21 -0500320#define CPSW_MAX_BLKS_TX 15
321#define CPSW_MAX_BLKS_TX_SHIFT 4
322#define CPSW_MAX_BLKS_RX 5
323
Mugunthan V Ndf828592012-03-18 20:17:54 +0000324struct cpsw_host_regs {
325 u32 max_blks;
326 u32 blk_cnt;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000327 u32 tx_in_ctl;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000328 u32 port_vlan;
329 u32 tx_pri_map;
330 u32 cpdma_tx_pri_map;
331 u32 cpdma_rx_chan_map;
332};
333
334struct cpsw_sliver_regs {
335 u32 id_ver;
336 u32 mac_control;
337 u32 mac_status;
338 u32 soft_reset;
339 u32 rx_maxlen;
340 u32 __reserved_0;
341 u32 rx_pause;
342 u32 tx_pause;
343 u32 __reserved_1;
344 u32 rx_pri_map;
345};
346
Mugunthan V Nd9718542013-07-23 15:38:17 +0530347struct cpsw_hw_stats {
348 u32 rxgoodframes;
349 u32 rxbroadcastframes;
350 u32 rxmulticastframes;
351 u32 rxpauseframes;
352 u32 rxcrcerrors;
353 u32 rxaligncodeerrors;
354 u32 rxoversizedframes;
355 u32 rxjabberframes;
356 u32 rxundersizedframes;
357 u32 rxfragments;
358 u32 __pad_0[2];
359 u32 rxoctets;
360 u32 txgoodframes;
361 u32 txbroadcastframes;
362 u32 txmulticastframes;
363 u32 txpauseframes;
364 u32 txdeferredframes;
365 u32 txcollisionframes;
366 u32 txsinglecollframes;
367 u32 txmultcollframes;
368 u32 txexcessivecollisions;
369 u32 txlatecollisions;
370 u32 txunderrun;
371 u32 txcarriersenseerrors;
372 u32 txoctets;
373 u32 octetframes64;
374 u32 octetframes65t127;
375 u32 octetframes128t255;
376 u32 octetframes256t511;
377 u32 octetframes512t1023;
378 u32 octetframes1024tup;
379 u32 netoctets;
380 u32 rxsofoverruns;
381 u32 rxmofoverruns;
382 u32 rxdmaoverruns;
383};
384
Grygorii Strashko2c8a14d2017-11-30 18:21:12 -0600385struct cpsw_slave_data {
386 struct device_node *phy_node;
387 char phy_id[MII_BUS_ID_SIZE];
388 int phy_if;
389 u8 mac_addr[ETH_ALEN];
390 u16 dual_emac_res_vlan; /* Reserved VLAN for DualEMAC */
Grygorii Strashko3ff18842018-11-25 18:15:25 -0600391 struct phy *ifphy;
Grygorii Strashko2c8a14d2017-11-30 18:21:12 -0600392};
393
394struct cpsw_platform_data {
395 struct cpsw_slave_data *slave_data;
396 u32 ss_reg_ofs; /* Subsystem control register offset */
397 u32 channels; /* number of cpdma channels (symmetric) */
398 u32 slaves; /* number of slave cpgmac ports */
399 u32 active_slave; /* time stamping, ethtool and SIOCGMIIPHY slave */
400 u32 ale_entries; /* ale table size */
401 u32 bd_ram_size; /*buffer descriptor ram size */
402 u32 mac_control; /* Mac control register */
403 u16 default_vlan; /* Def VLAN for ALE lookup in VLAN aware mode*/
404 bool dual_emac; /* Enable Dual EMAC mode */
405};
406
Mugunthan V Ndf828592012-03-18 20:17:54 +0000407struct cpsw_slave {
Richard Cochran9750a3a2012-10-29 08:45:15 +0000408 void __iomem *regs;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000409 struct cpsw_sliver_regs __iomem *sliver;
410 int slave_num;
411 u32 mac_control;
412 struct cpsw_slave_data *data;
413 struct phy_device *phy;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000414 struct net_device *ndev;
415 u32 port_vlan;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000416};
417
Richard Cochran9750a3a2012-10-29 08:45:15 +0000418static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
419{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600420 return readl_relaxed(slave->regs + offset);
Richard Cochran9750a3a2012-10-29 08:45:15 +0000421}
422
423static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
424{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600425 writel_relaxed(val, slave->regs + offset);
Richard Cochran9750a3a2012-10-29 08:45:15 +0000426}
427
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200428struct cpsw_vector {
429 struct cpdma_chan *ch;
430 int budget;
431};
432
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300433struct cpsw_common {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +0300434 struct device *dev;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300435 struct cpsw_platform_data data;
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300436 struct napi_struct napi_rx;
437 struct napi_struct napi_tx;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +0300438 struct cpsw_ss_regs __iomem *regs;
439 struct cpsw_wr_regs __iomem *wr_regs;
440 u8 __iomem *hw_stats;
441 struct cpsw_host_regs __iomem *host_port_regs;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300442 u32 version;
443 u32 coal_intvl;
444 u32 bus_freq_mhz;
445 int rx_packet_max;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300446 struct cpsw_slave *slaves;
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300447 struct cpdma_ctlr *dma;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200448 struct cpsw_vector txv[CPSW_MAX_QUEUES];
449 struct cpsw_vector rxv[CPSW_MAX_QUEUES];
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300450 struct cpsw_ale *ale;
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +0300451 bool quirk_irq;
452 bool rx_irq_disabled;
453 bool tx_irq_disabled;
454 u32 irqs_table[IRQ_NUM];
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300455 struct cpts *cpts;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300456 int rx_ch_num, tx_ch_num;
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +0200457 int speed;
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +0200458 int usage_count;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300459};
460
461struct cpsw_priv {
Mugunthan V Ndf828592012-03-18 20:17:54 +0000462 struct net_device *ndev;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000463 struct device *dev;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000464 u32 msg_enable;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000465 u8 mac_addr[ETH_ALEN];
Mugunthan V N1923d6e2014-09-08 22:54:02 +0530466 bool rx_pause;
467 bool tx_pause;
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +0300468 bool mqprio_hw;
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +0300469 int fifo_bw[CPSW_TC_NUM];
470 int shp_cfg_speed;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000471 u32 emac_port;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300472 struct cpsw_common *cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000473};
474
Mugunthan V Nd9718542013-07-23 15:38:17 +0530475struct cpsw_stats {
476 char stat_string[ETH_GSTRING_LEN];
477 int type;
478 int sizeof_stat;
479 int stat_offset;
480};
481
482enum {
483 CPSW_STATS,
484 CPDMA_RX_STATS,
485 CPDMA_TX_STATS,
486};
487
488#define CPSW_STAT(m) CPSW_STATS, \
zhong jianga90546e2018-09-19 19:32:14 +0800489 FIELD_SIZEOF(struct cpsw_hw_stats, m), \
Mugunthan V Nd9718542013-07-23 15:38:17 +0530490 offsetof(struct cpsw_hw_stats, m)
491#define CPDMA_RX_STAT(m) CPDMA_RX_STATS, \
zhong jianga90546e2018-09-19 19:32:14 +0800492 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
Mugunthan V Nd9718542013-07-23 15:38:17 +0530493 offsetof(struct cpdma_chan_stats, m)
494#define CPDMA_TX_STAT(m) CPDMA_TX_STATS, \
zhong jianga90546e2018-09-19 19:32:14 +0800495 FIELD_SIZEOF(struct cpdma_chan_stats, m), \
Mugunthan V Nd9718542013-07-23 15:38:17 +0530496 offsetof(struct cpdma_chan_stats, m)
497
498static const struct cpsw_stats cpsw_gstrings_stats[] = {
499 { "Good Rx Frames", CPSW_STAT(rxgoodframes) },
500 { "Broadcast Rx Frames", CPSW_STAT(rxbroadcastframes) },
501 { "Multicast Rx Frames", CPSW_STAT(rxmulticastframes) },
502 { "Pause Rx Frames", CPSW_STAT(rxpauseframes) },
503 { "Rx CRC Errors", CPSW_STAT(rxcrcerrors) },
504 { "Rx Align/Code Errors", CPSW_STAT(rxaligncodeerrors) },
505 { "Oversize Rx Frames", CPSW_STAT(rxoversizedframes) },
506 { "Rx Jabbers", CPSW_STAT(rxjabberframes) },
507 { "Undersize (Short) Rx Frames", CPSW_STAT(rxundersizedframes) },
508 { "Rx Fragments", CPSW_STAT(rxfragments) },
509 { "Rx Octets", CPSW_STAT(rxoctets) },
510 { "Good Tx Frames", CPSW_STAT(txgoodframes) },
511 { "Broadcast Tx Frames", CPSW_STAT(txbroadcastframes) },
512 { "Multicast Tx Frames", CPSW_STAT(txmulticastframes) },
513 { "Pause Tx Frames", CPSW_STAT(txpauseframes) },
514 { "Deferred Tx Frames", CPSW_STAT(txdeferredframes) },
515 { "Collisions", CPSW_STAT(txcollisionframes) },
516 { "Single Collision Tx Frames", CPSW_STAT(txsinglecollframes) },
517 { "Multiple Collision Tx Frames", CPSW_STAT(txmultcollframes) },
518 { "Excessive Collisions", CPSW_STAT(txexcessivecollisions) },
519 { "Late Collisions", CPSW_STAT(txlatecollisions) },
520 { "Tx Underrun", CPSW_STAT(txunderrun) },
521 { "Carrier Sense Errors", CPSW_STAT(txcarriersenseerrors) },
522 { "Tx Octets", CPSW_STAT(txoctets) },
523 { "Rx + Tx 64 Octet Frames", CPSW_STAT(octetframes64) },
524 { "Rx + Tx 65-127 Octet Frames", CPSW_STAT(octetframes65t127) },
525 { "Rx + Tx 128-255 Octet Frames", CPSW_STAT(octetframes128t255) },
526 { "Rx + Tx 256-511 Octet Frames", CPSW_STAT(octetframes256t511) },
527 { "Rx + Tx 512-1023 Octet Frames", CPSW_STAT(octetframes512t1023) },
528 { "Rx + Tx 1024-Up Octet Frames", CPSW_STAT(octetframes1024tup) },
529 { "Net Octets", CPSW_STAT(netoctets) },
530 { "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
531 { "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
532 { "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
Mugunthan V Nd9718542013-07-23 15:38:17 +0530533};
534
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300535static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
536 { "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
537 { "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
538 { "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
539 { "misqueued", CPDMA_RX_STAT(misqueued) },
540 { "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
541 { "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
542 { "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
543 { "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
544 { "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
545 { "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
546 { "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
547 { "requeue", CPDMA_RX_STAT(requeue) },
548 { "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
549};
550
551#define CPSW_STATS_COMMON_LEN ARRAY_SIZE(cpsw_gstrings_stats)
552#define CPSW_STATS_CH_LEN ARRAY_SIZE(cpsw_gstrings_ch_stats)
Mugunthan V Nd9718542013-07-23 15:38:17 +0530553
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +0300554#define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300555#define napi_to_cpsw(napi) container_of(napi, struct cpsw_common, napi)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000556#define for_each_slave(priv, func, arg...) \
557 do { \
Sebastian Siewior6e6ceae2013-04-24 08:48:24 +0000558 struct cpsw_slave *slave; \
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300559 struct cpsw_common *cpsw = (priv)->cpsw; \
Sebastian Siewior6e6ceae2013-04-24 08:48:24 +0000560 int n; \
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300561 if (cpsw->data.dual_emac) \
562 (func)((cpsw)->slaves + priv->emac_port, ##arg);\
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000563 else \
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300564 for (n = cpsw->data.slaves, \
565 slave = cpsw->slaves; \
Sebastian Siewior6e6ceae2013-04-24 08:48:24 +0000566 n; n--) \
567 (func)(slave++, ##arg); \
Mugunthan V Ndf828592012-03-18 20:17:54 +0000568 } while (0)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000569
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +0300570static inline int cpsw_get_slave_port(u32 slave_num)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000571{
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +0300572 return slave_num + 1;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000573}
Mugunthan V Ndf828592012-03-18 20:17:54 +0000574
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300575static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr)
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300576{
577 struct cpsw_common *cpsw = priv->cpsw;
578
579 if (cpsw->data.dual_emac) {
580 struct cpsw_slave *slave = cpsw->slaves + priv->emac_port;
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300581
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +0300582 cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST,
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300583 ALE_VLAN, slave->port_vlan, 0);
584 return;
585 }
586
587 cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0);
588}
589
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530590static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
591{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300592 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
593 struct cpsw_ale *ale = cpsw->ale;
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530594 int i;
595
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300596 if (cpsw->data.dual_emac) {
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530597 bool flag = false;
598
599 /* Enabling promiscuous mode for one interface will be
600 * common for both the interface as the interface shares
601 * the same hardware resource.
602 */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300603 for (i = 0; i < cpsw->data.slaves; i++)
604 if (cpsw->slaves[i].ndev->flags & IFF_PROMISC)
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530605 flag = true;
606
607 if (!enable && flag) {
608 enable = true;
609 dev_err(&ndev->dev, "promiscuity not disabled as the other interface is still in promiscuity mode\n");
610 }
611
612 if (enable) {
613 /* Enable Bypass */
614 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 1);
615
616 dev_dbg(&ndev->dev, "promiscuity enabled\n");
617 } else {
618 /* Disable Bypass */
619 cpsw_ale_control_set(ale, 0, ALE_BYPASS, 0);
620 dev_dbg(&ndev->dev, "promiscuity disabled\n");
621 }
622 } else {
623 if (enable) {
624 unsigned long timeout = jiffies + HZ;
625
Lennart Sorensen6f979eb2014-10-31 13:28:54 -0400626 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300627 for (i = 0; i <= cpsw->data.slaves; i++) {
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530628 cpsw_ale_control_set(ale, i,
629 ALE_PORT_NOLEARN, 1);
630 cpsw_ale_control_set(ale, i,
631 ALE_PORT_NO_SA_UPDATE, 1);
632 }
633
634 /* Clear All Untouched entries */
635 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
636 do {
637 cpu_relax();
638 if (cpsw_ale_control_get(ale, 0, ALE_AGEOUT))
639 break;
640 } while (time_after(timeout, jiffies));
641 cpsw_ale_control_set(ale, 0, ALE_AGEOUT, 1);
642
643 /* Clear all mcast from ALE */
Grygorii Strashko61f1cef2016-04-07 15:16:43 +0300644 cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1);
Ivan Khoronzhuk9737cc99d2018-10-22 21:51:36 +0300645 __dev_mc_unsync(ndev, NULL);
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530646
647 /* Flood All Unicast Packets to Host port */
648 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
649 dev_dbg(&ndev->dev, "promiscuity enabled\n");
650 } else {
Lennart Sorensen6f979eb2014-10-31 13:28:54 -0400651 /* Don't Flood All Unicast Packets to Host port */
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530652 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
653
Lennart Sorensen6f979eb2014-10-31 13:28:54 -0400654 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300655 for (i = 0; i <= cpsw->data.slaves; i++) {
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530656 cpsw_ale_control_set(ale, i,
657 ALE_PORT_NOLEARN, 0);
658 cpsw_ale_control_set(ale, i,
659 ALE_PORT_NO_SA_UPDATE, 0);
660 }
661 dev_dbg(&ndev->dev, "promiscuity disabled\n");
662 }
663 }
664}
665
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300666static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr)
667{
668 struct cpsw_priv *priv = netdev_priv(ndev);
669
670 cpsw_add_mcast(priv, addr);
671 return 0;
672}
673
674static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr)
Mugunthan V N5c50a852012-10-29 08:45:11 +0000675{
676 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300677 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300678 int vid, flags;
Mugunthan V N25906052015-01-13 17:35:49 +0530679
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300680 if (cpsw->data.dual_emac) {
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +0300681 vid = cpsw->slaves[priv->emac_port].port_vlan;
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300682 flags = ALE_VLAN;
683 } else {
684 vid = 0;
685 flags = 0;
686 }
687
688 cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid);
689 return 0;
690}
691
692static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
693{
694 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000695
696 if (ndev->flags & IFF_PROMISC) {
697 /* Enable promiscuous mode */
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530698 cpsw_set_promiscious(ndev, true);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300699 cpsw_ale_set_allmulti(cpsw->ale, IFF_ALLMULTI);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000700 return;
Mugunthan V N0cd8f9c2014-01-23 00:03:12 +0530701 } else {
702 /* Disable promiscuous mode */
703 cpsw_set_promiscious(ndev, false);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000704 }
705
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -0400706 /* Restore allmulti on vlans if necessary */
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300707 cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI);
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -0400708
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +0300709 __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr);
Mugunthan V N5c50a852012-10-29 08:45:11 +0000710}
711
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300712static void cpsw_intr_enable(struct cpsw_common *cpsw)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000713{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600714 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
715 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000716
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300717 cpdma_ctlr_int_ctrl(cpsw->dma, true);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000718 return;
719}
720
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300721static void cpsw_intr_disable(struct cpsw_common *cpsw)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000722{
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -0600723 writel_relaxed(0, &cpsw->wr_regs->tx_en);
724 writel_relaxed(0, &cpsw->wr_regs->rx_en);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000725
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300726 cpdma_ctlr_int_ctrl(cpsw->dma, false);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000727 return;
728}
729
Olof Johansson1a3b5052013-12-11 15:58:07 -0800730static void cpsw_tx_handler(void *token, int len, int status)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000731{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300732 struct netdev_queue *txq;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000733 struct sk_buff *skb = token;
734 struct net_device *ndev = skb->dev;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300735 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000736
Mugunthan V Nfae50822013-01-17 06:31:34 +0000737 /* Check whether the queue is stopped due to stalled tx dma, if the
738 * queue is stopped then start the queue as we have free desc for tx
739 */
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300740 txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
741 if (unlikely(netif_tx_queue_stopped(txq)))
742 netif_tx_wake_queue(txq);
743
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300744 cpts_tx_timestamp(cpsw->cpts, skb);
Tobias Klauser8dc43dd2014-03-10 13:12:23 +0100745 ndev->stats.tx_packets++;
746 ndev->stats.tx_bytes += len;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000747 dev_kfree_skb_any(skb);
748}
749
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500750static void cpsw_rx_vlan_encap(struct sk_buff *skb)
751{
752 struct cpsw_priv *priv = netdev_priv(skb->dev);
753 struct cpsw_common *cpsw = priv->cpsw;
754 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
755 u16 vtag, vid, prio, pkt_type;
756
757 /* Remove VLAN header encapsulation word */
758 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
759
760 pkt_type = (rx_vlan_encap_hdr >>
761 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
762 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
763 /* Ignore unknown & Priority-tagged packets*/
764 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
765 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
766 return;
767
768 vid = (rx_vlan_encap_hdr >>
769 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
770 VLAN_VID_MASK;
771 /* Ignore vid 0 and pass packet as is */
772 if (!vid)
773 return;
774 /* Ignore default vlans in dual mac mode */
775 if (cpsw->data.dual_emac &&
776 vid == cpsw->slaves[priv->emac_port].port_vlan)
777 return;
778
779 prio = (rx_vlan_encap_hdr >>
780 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
781 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
782
783 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
784 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
785
786 /* strip vlan tag for VLAN-tagged packet */
787 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
788 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
789 skb_pull(skb, VLAN_HLEN);
790 }
791}
792
Olof Johansson1a3b5052013-12-11 15:58:07 -0800793static void cpsw_rx_handler(void *token, int len, int status)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000794{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300795 struct cpdma_chan *ch;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000796 struct sk_buff *skb = token;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000797 struct sk_buff *new_skb;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000798 struct net_device *ndev = skb->dev;
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300799 int ret = 0, port;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300800 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000801
Ivan Khoronzhukfea49f62018-07-31 01:05:39 +0300802 if (cpsw->data.dual_emac) {
803 port = CPDMA_RX_SOURCE_PORT(status);
804 if (port) {
805 ndev = cpsw->slaves[--port].ndev;
806 skb->dev = ndev;
807 }
808 }
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +0000809
Mugunthan V N16e5c572014-04-10 14:23:23 +0530810 if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
Ivan Khoronzhukfe734d02017-01-19 18:58:26 +0200811 /* In dual emac mode check for all interfaces */
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +0200812 if (cpsw->data.dual_emac && cpsw->usage_count &&
Ivan Khoronzhukfe734d02017-01-19 18:58:26 +0200813 (status >= 0)) {
Mugunthan V Na0e2c822014-09-10 16:38:09 +0530814 /* The packet received is for the interface which
815 * is already down and the other interface is up
Joe Perchesdbedd442015-03-06 20:49:12 -0800816 * and running, instead of freeing which results
Mugunthan V Na0e2c822014-09-10 16:38:09 +0530817 * in reducing of the number of rx descriptor in
818 * DMA engine, requeue skb back to cpdma.
819 */
820 new_skb = skb;
821 goto requeue;
822 }
823
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000824 /* the interface is going down, skbs are purged */
Mugunthan V Ndf828592012-03-18 20:17:54 +0000825 dev_kfree_skb_any(skb);
826 return;
827 }
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000828
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300829 new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000830 if (new_skb) {
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300831 skb_copy_queue_mapping(new_skb, skb);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000832 skb_put(skb, len);
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -0500833 if (status & CPDMA_RX_VLAN_ENCAP)
834 cpsw_rx_vlan_encap(skb);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +0300835 cpts_rx_timestamp(cpsw->cpts, skb);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000836 skb->protocol = eth_type_trans(skb, ndev);
837 netif_receive_skb(skb);
Tobias Klauser8dc43dd2014-03-10 13:12:23 +0100838 ndev->stats.rx_bytes += len;
839 ndev->stats.rx_packets++;
Grygorii Strashko254a49d2016-08-09 15:09:44 +0300840 kmemleak_not_leak(new_skb);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000841 } else {
Tobias Klauser8dc43dd2014-03-10 13:12:23 +0100842 ndev->stats.rx_dropped++;
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000843 new_skb = skb;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000844 }
845
Mugunthan V Na0e2c822014-09-10 16:38:09 +0530846requeue:
Ivan Khoronzhukce52c742016-08-22 21:18:28 +0300847 if (netif_dormant(ndev)) {
848 dev_kfree_skb_any(new_skb);
849 return;
850 }
851
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200852 ch = cpsw->rxv[skb_get_queue_mapping(new_skb)].ch;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300853 ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300854 skb_tailroom(new_skb), 0);
Sebastian Siewiorb4727e62013-04-23 07:31:39 +0000855 if (WARN_ON(ret < 0))
856 dev_kfree_skb_any(new_skb);
Mugunthan V Ndf828592012-03-18 20:17:54 +0000857}
858
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200859static void cpsw_split_res(struct net_device *ndev)
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200860{
861 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200862 u32 consumed_rate = 0, bigest_rate = 0;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200863 struct cpsw_common *cpsw = priv->cpsw;
864 struct cpsw_vector *txv = cpsw->txv;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200865 int i, ch_weight, rlim_ch_num = 0;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200866 int budget, bigest_rate_ch = 0;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200867 u32 ch_rate, max_rate;
868 int ch_budget = 0;
869
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200870 for (i = 0; i < cpsw->tx_ch_num; i++) {
871 ch_rate = cpdma_chan_get_rate(txv[i].ch);
872 if (!ch_rate)
873 continue;
874
875 rlim_ch_num++;
876 consumed_rate += ch_rate;
877 }
878
879 if (cpsw->tx_ch_num == rlim_ch_num) {
880 max_rate = consumed_rate;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200881 } else if (!rlim_ch_num) {
882 ch_budget = CPSW_POLL_WEIGHT / cpsw->tx_ch_num;
883 bigest_rate = 0;
884 max_rate = consumed_rate;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200885 } else {
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +0200886 max_rate = cpsw->speed * 1000;
887
888 /* if max_rate is less then expected due to reduced link speed,
889 * split proportionally according next potential max speed
890 */
891 if (max_rate < consumed_rate)
892 max_rate *= 10;
893
894 if (max_rate < consumed_rate)
895 max_rate *= 10;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200896
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200897 ch_budget = (consumed_rate * CPSW_POLL_WEIGHT) / max_rate;
898 ch_budget = (CPSW_POLL_WEIGHT - ch_budget) /
899 (cpsw->tx_ch_num - rlim_ch_num);
900 bigest_rate = (max_rate - consumed_rate) /
901 (cpsw->tx_ch_num - rlim_ch_num);
902 }
903
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200904 /* split tx weight/budget */
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200905 budget = CPSW_POLL_WEIGHT;
906 for (i = 0; i < cpsw->tx_ch_num; i++) {
907 ch_rate = cpdma_chan_get_rate(txv[i].ch);
908 if (ch_rate) {
909 txv[i].budget = (ch_rate * CPSW_POLL_WEIGHT) / max_rate;
910 if (!txv[i].budget)
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200911 txv[i].budget++;
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200912 if (ch_rate > bigest_rate) {
913 bigest_rate_ch = i;
914 bigest_rate = ch_rate;
915 }
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200916
917 ch_weight = (ch_rate * 100) / max_rate;
918 if (!ch_weight)
919 ch_weight++;
920 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200921 } else {
922 txv[i].budget = ch_budget;
923 if (!bigest_rate_ch)
924 bigest_rate_ch = i;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +0200925 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
Ivan Khoronzhuk48e0a832016-12-06 03:45:00 +0200926 }
927
928 budget -= txv[i].budget;
929 }
930
931 if (budget)
932 txv[bigest_rate_ch].budget += budget;
933
934 /* split rx budget */
935 budget = CPSW_POLL_WEIGHT;
936 ch_budget = budget / cpsw->rx_ch_num;
937 for (i = 0; i < cpsw->rx_ch_num; i++) {
938 cpsw->rxv[i].budget = ch_budget;
939 budget -= ch_budget;
940 }
941
942 if (budget)
943 cpsw->rxv[0].budget += budget;
944}
945
Felipe Balbic03abd82015-01-16 10:11:12 -0600946static irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000947{
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300948 struct cpsw_common *cpsw = dev_id;
Felipe Balbi7ce67a32015-01-02 16:15:59 -0600949
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +0300950 writel(0, &cpsw->wr_regs->tx_en);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300951 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
Felipe Balbic03abd82015-01-16 10:11:12 -0600952
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +0300953 if (cpsw->quirk_irq) {
954 disable_irq_nosync(cpsw->irqs_table[1]);
955 cpsw->tx_irq_disabled = true;
Mugunthan V N7da11602015-08-12 15:22:53 +0530956 }
957
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300958 napi_schedule(&cpsw->napi_tx);
Felipe Balbic03abd82015-01-16 10:11:12 -0600959 return IRQ_HANDLED;
960}
961
962static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
963{
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300964 struct cpsw_common *cpsw = dev_id;
Felipe Balbic03abd82015-01-16 10:11:12 -0600965
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +0300966 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +0300967 writel(0, &cpsw->wr_regs->rx_en);
Sebastian Siewiorfd51cf12013-04-23 07:31:37 +0000968
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +0300969 if (cpsw->quirk_irq) {
970 disable_irq_nosync(cpsw->irqs_table[0]);
971 cpsw->rx_irq_disabled = true;
Mugunthan V N7da11602015-08-12 15:22:53 +0530972 }
973
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300974 napi_schedule(&cpsw->napi_rx);
Mugunthan V Nd354eb82015-08-04 16:06:19 +0530975 return IRQ_HANDLED;
Mugunthan V Ndf828592012-03-18 20:17:54 +0000976}
977
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +0300978static int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
Mugunthan V Ndf828592012-03-18 20:17:54 +0000979{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300980 u32 ch_map;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200981 int num_tx, cur_budget, ch;
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +0300982 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200983 struct cpsw_vector *txv;
Mugunthan V N32a74322015-08-04 16:06:20 +0530984
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300985 /* process every unprocessed channel */
986 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +0300987 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
988 if (!(ch_map & 0x80))
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +0300989 continue;
990
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +0200991 txv = &cpsw->txv[ch];
992 if (unlikely(txv->budget > budget - num_tx))
993 cur_budget = budget - num_tx;
994 else
995 cur_budget = txv->budget;
996
997 num_tx += cpdma_chan_process(txv->ch, cur_budget);
Ivan Khoronzhuk342934a2016-11-29 17:00:50 +0200998 if (num_tx >= budget)
999 break;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001000 }
1001
Mugunthan V N32a74322015-08-04 16:06:20 +05301002 if (num_tx < budget) {
1003 napi_complete(napi_tx);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001004 writel(0xff, &cpsw->wr_regs->tx_en);
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001005 }
1006
1007 return num_tx;
1008}
1009
1010static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
1011{
1012 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
1013 int num_tx;
1014
1015 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
1016 if (num_tx < budget) {
1017 napi_complete(napi_tx);
1018 writel(0xff, &cpsw->wr_regs->tx_en);
1019 if (cpsw->tx_irq_disabled) {
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001020 cpsw->tx_irq_disabled = false;
1021 enable_irq(cpsw->irqs_table[1]);
Mugunthan V N7da11602015-08-12 15:22:53 +05301022 }
Mugunthan V N32a74322015-08-04 16:06:20 +05301023 }
1024
Mugunthan V N32a74322015-08-04 16:06:20 +05301025 return num_tx;
1026}
1027
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001028static int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
Mugunthan V N32a74322015-08-04 16:06:20 +05301029{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001030 u32 ch_map;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001031 int num_rx, cur_budget, ch;
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001032 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001033 struct cpsw_vector *rxv;
Mugunthan V N510a1e722013-02-17 22:19:20 +00001034
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001035 /* process every unprocessed channel */
1036 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
Ivan Khoronzhuk342934a2016-11-29 17:00:50 +02001037 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001038 if (!(ch_map & 0x01))
1039 continue;
1040
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001041 rxv = &cpsw->rxv[ch];
1042 if (unlikely(rxv->budget > budget - num_rx))
1043 cur_budget = budget - num_rx;
1044 else
1045 cur_budget = rxv->budget;
1046
1047 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
Ivan Khoronzhuk342934a2016-11-29 17:00:50 +02001048 if (num_rx >= budget)
1049 break;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001050 }
1051
Mugunthan V N510a1e722013-02-17 22:19:20 +00001052 if (num_rx < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -08001053 napi_complete_done(napi_rx, num_rx);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001054 writel(0xff, &cpsw->wr_regs->rx_en);
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03001055 }
1056
1057 return num_rx;
1058}
1059
1060static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
1061{
1062 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
1063 int num_rx;
1064
1065 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
1066 if (num_rx < budget) {
1067 napi_complete_done(napi_rx, num_rx);
1068 writel(0xff, &cpsw->wr_regs->rx_en);
1069 if (cpsw->rx_irq_disabled) {
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001070 cpsw->rx_irq_disabled = false;
1071 enable_irq(cpsw->irqs_table[0]);
Mugunthan V N7da11602015-08-12 15:22:53 +05301072 }
Mugunthan V N510a1e722013-02-17 22:19:20 +00001073 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001074
Mugunthan V Ndf828592012-03-18 20:17:54 +00001075 return num_rx;
1076}
1077
1078static inline void soft_reset(const char *module, void __iomem *reg)
1079{
1080 unsigned long timeout = jiffies + HZ;
1081
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001082 writel_relaxed(1, reg);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001083 do {
1084 cpu_relax();
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001085 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
Mugunthan V Ndf828592012-03-18 20:17:54 +00001086
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001087 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001088}
1089
Mugunthan V Ndf828592012-03-18 20:17:54 +00001090static void cpsw_set_slave_mac(struct cpsw_slave *slave,
1091 struct cpsw_priv *priv)
1092{
Richard Cochran9750a3a2012-10-29 08:45:15 +00001093 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
1094 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001095}
1096
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03001097static bool cpsw_shp_is_off(struct cpsw_priv *priv)
1098{
1099 struct cpsw_common *cpsw = priv->cpsw;
1100 struct cpsw_slave *slave;
1101 u32 shift, mask, val;
1102
1103 val = readl_relaxed(&cpsw->regs->ptype);
1104
1105 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1106 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1107 mask = 7 << shift;
1108 val = val & mask;
1109
1110 return !val;
1111}
1112
1113static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
1114{
1115 struct cpsw_common *cpsw = priv->cpsw;
1116 struct cpsw_slave *slave;
1117 u32 shift, mask, val;
1118
1119 val = readl_relaxed(&cpsw->regs->ptype);
1120
1121 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1122 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
1123 mask = (1 << --fifo) << shift;
1124 val = on ? val | mask : val & ~mask;
1125
1126 writel_relaxed(val, &cpsw->regs->ptype);
1127}
1128
Mugunthan V Ndf828592012-03-18 20:17:54 +00001129static void _cpsw_adjust_link(struct cpsw_slave *slave,
1130 struct cpsw_priv *priv, bool *link)
1131{
1132 struct phy_device *phy = slave->phy;
1133 u32 mac_control = 0;
1134 u32 slave_port;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001135 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001136
1137 if (!phy)
1138 return;
1139
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +03001140 slave_port = cpsw_get_slave_port(slave->slave_num);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001141
1142 if (phy->link) {
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001143 mac_control = cpsw->data.mac_control;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001144
1145 /* enable forwarding */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001146 cpsw_ale_control_set(cpsw->ale, slave_port,
Mugunthan V Ndf828592012-03-18 20:17:54 +00001147 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1148
1149 if (phy->speed == 1000)
1150 mac_control |= BIT(7); /* GIGABITEN */
1151 if (phy->duplex)
1152 mac_control |= BIT(0); /* FULLDUPLEXEN */
Daniel Mack342b7b72012-09-27 09:19:34 +00001153
1154 /* set speed_in input in case RMII mode is used in 100Mbps */
1155 if (phy->speed == 100)
1156 mac_control |= BIT(15);
SZ Lin (林上智)f9db5062018-03-16 00:56:01 +08001157 /* in band mode only works in 10Mbps RGMII mode */
1158 else if ((phy->speed == 10) && phy_interface_is_rgmii(phy))
Mugunthan V Na81d8762013-12-13 18:42:55 +05301159 mac_control |= BIT(18); /* In Band mode */
Daniel Mack342b7b72012-09-27 09:19:34 +00001160
Mugunthan V N1923d6e2014-09-08 22:54:02 +05301161 if (priv->rx_pause)
1162 mac_control |= BIT(3);
1163
1164 if (priv->tx_pause)
1165 mac_control |= BIT(4);
1166
Mugunthan V Ndf828592012-03-18 20:17:54 +00001167 *link = true;
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03001168
1169 if (priv->shp_cfg_speed &&
1170 priv->shp_cfg_speed != slave->phy->speed &&
1171 !cpsw_shp_is_off(priv))
1172 dev_warn(priv->dev,
1173 "Speed was changed, CBS shaper speeds are changed!");
Mugunthan V Ndf828592012-03-18 20:17:54 +00001174 } else {
1175 mac_control = 0;
1176 /* disable forwarding */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001177 cpsw_ale_control_set(cpsw->ale, slave_port,
Mugunthan V Ndf828592012-03-18 20:17:54 +00001178 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1179 }
1180
1181 if (mac_control != slave->mac_control) {
1182 phy_print_status(phy);
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001183 writel_relaxed(mac_control, &slave->sliver->mac_control);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001184 }
1185
1186 slave->mac_control = mac_control;
1187}
1188
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001189static int cpsw_get_common_speed(struct cpsw_common *cpsw)
1190{
1191 int i, speed;
1192
1193 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
1194 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
1195 speed += cpsw->slaves[i].phy->speed;
1196
1197 return speed;
1198}
1199
1200static int cpsw_need_resplit(struct cpsw_common *cpsw)
1201{
1202 int i, rlim_ch_num;
1203 int speed, ch_rate;
1204
1205 /* re-split resources only in case speed was changed */
1206 speed = cpsw_get_common_speed(cpsw);
1207 if (speed == cpsw->speed || !speed)
1208 return 0;
1209
1210 cpsw->speed = speed;
1211
1212 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
1213 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
1214 if (!ch_rate)
1215 break;
1216
1217 rlim_ch_num++;
1218 }
1219
1220 /* cases not dependent on speed */
1221 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
1222 return 0;
1223
1224 return 1;
1225}
1226
Mugunthan V Ndf828592012-03-18 20:17:54 +00001227static void cpsw_adjust_link(struct net_device *ndev)
1228{
1229 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001230 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001231 bool link = false;
1232
1233 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
1234
1235 if (link) {
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001236 if (cpsw_need_resplit(cpsw))
1237 cpsw_split_res(ndev);
1238
Mugunthan V Ndf828592012-03-18 20:17:54 +00001239 netif_carrier_on(ndev);
1240 if (netif_running(ndev))
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001241 netif_tx_wake_all_queues(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001242 } else {
1243 netif_carrier_off(ndev);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001244 netif_tx_stop_all_queues(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001245 }
1246}
1247
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001248static int cpsw_get_coalesce(struct net_device *ndev,
1249 struct ethtool_coalesce *coal)
1250{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001251 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001252
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001253 coal->rx_coalesce_usecs = cpsw->coal_intvl;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001254 return 0;
1255}
1256
1257static int cpsw_set_coalesce(struct net_device *ndev,
1258 struct ethtool_coalesce *coal)
1259{
1260 struct cpsw_priv *priv = netdev_priv(ndev);
1261 u32 int_ctrl;
1262 u32 num_interrupts = 0;
1263 u32 prescale = 0;
1264 u32 addnl_dvdr = 1;
1265 u32 coal_intvl = 0;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001266 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001267
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001268 coal_intvl = coal->rx_coalesce_usecs;
1269
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001270 int_ctrl = readl(&cpsw->wr_regs->int_control);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001271 prescale = cpsw->bus_freq_mhz * 4;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001272
Mugunthan V Na84bc2a2014-07-15 20:26:53 +05301273 if (!coal->rx_coalesce_usecs) {
1274 int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
1275 goto update_return;
1276 }
1277
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001278 if (coal_intvl < CPSW_CMINTMIN_INTVL)
1279 coal_intvl = CPSW_CMINTMIN_INTVL;
1280
1281 if (coal_intvl > CPSW_CMINTMAX_INTVL) {
1282 /* Interrupt pacer works with 4us Pulse, we can
1283 * throttle further by dilating the 4us pulse.
1284 */
1285 addnl_dvdr = CPSW_INTPRESCALE_MASK / prescale;
1286
1287 if (addnl_dvdr > 1) {
1288 prescale *= addnl_dvdr;
1289 if (coal_intvl > (CPSW_CMINTMAX_INTVL * addnl_dvdr))
1290 coal_intvl = (CPSW_CMINTMAX_INTVL
1291 * addnl_dvdr);
1292 } else {
1293 addnl_dvdr = 1;
1294 coal_intvl = CPSW_CMINTMAX_INTVL;
1295 }
1296 }
1297
1298 num_interrupts = (1000 * addnl_dvdr) / coal_intvl;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001299 writel(num_interrupts, &cpsw->wr_regs->rx_imax);
1300 writel(num_interrupts, &cpsw->wr_regs->tx_imax);
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001301
1302 int_ctrl |= CPSW_INTPACEEN;
1303 int_ctrl &= (~CPSW_INTPRESCALE_MASK);
1304 int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
Mugunthan V Na84bc2a2014-07-15 20:26:53 +05301305
1306update_return:
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001307 writel(int_ctrl, &cpsw->wr_regs->int_control);
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001308
1309 cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001310 cpsw->coal_intvl = coal_intvl;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001311
1312 return 0;
1313}
1314
Mugunthan V Nd9718542013-07-23 15:38:17 +05301315static int cpsw_get_sset_count(struct net_device *ndev, int sset)
1316{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001317 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
1318
Mugunthan V Nd9718542013-07-23 15:38:17 +05301319 switch (sset) {
1320 case ETH_SS_STATS:
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001321 return (CPSW_STATS_COMMON_LEN +
1322 (cpsw->rx_ch_num + cpsw->tx_ch_num) *
1323 CPSW_STATS_CH_LEN);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301324 default:
1325 return -EOPNOTSUPP;
1326 }
1327}
1328
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001329static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
1330{
1331 int ch_stats_len;
1332 int line;
1333 int i;
1334
1335 ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
1336 for (i = 0; i < ch_stats_len; i++) {
1337 line = i % CPSW_STATS_CH_LEN;
1338 snprintf(*p, ETH_GSTRING_LEN,
Florian Fainellibf2ce3f2018-05-21 11:45:53 -07001339 "%s DMA chan %ld: %s", rx_dir ? "Rx" : "Tx",
1340 (long)(i / CPSW_STATS_CH_LEN),
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001341 cpsw_gstrings_ch_stats[line].stat_string);
1342 *p += ETH_GSTRING_LEN;
1343 }
1344}
1345
Mugunthan V Nd9718542013-07-23 15:38:17 +05301346static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1347{
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001348 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301349 u8 *p = data;
1350 int i;
1351
1352 switch (stringset) {
1353 case ETH_SS_STATS:
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001354 for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
Mugunthan V Nd9718542013-07-23 15:38:17 +05301355 memcpy(p, cpsw_gstrings_stats[i].stat_string,
1356 ETH_GSTRING_LEN);
1357 p += ETH_GSTRING_LEN;
1358 }
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001359
1360 cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
1361 cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301362 break;
1363 }
1364}
1365
1366static void cpsw_get_ethtool_stats(struct net_device *ndev,
1367 struct ethtool_stats *stats, u64 *data)
1368{
Mugunthan V Nd9718542013-07-23 15:38:17 +05301369 u8 *p;
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001370 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001371 struct cpdma_chan_stats ch_stats;
1372 int i, l, ch;
Mugunthan V Nd9718542013-07-23 15:38:17 +05301373
1374 /* Collect Davinci CPDMA stats for Rx and Tx Channel */
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001375 for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
1376 data[l] = readl(cpsw->hw_stats +
1377 cpsw_gstrings_stats[l].stat_offset);
Mugunthan V Nd9718542013-07-23 15:38:17 +05301378
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001379 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001380 cpdma_chan_get_stats(cpsw->rxv[ch].ch, &ch_stats);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001381 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1382 p = (u8 *)&ch_stats +
1383 cpsw_gstrings_ch_stats[i].stat_offset;
1384 data[l] = *(u32 *)p;
1385 }
1386 }
Mugunthan V Nd9718542013-07-23 15:38:17 +05301387
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001388 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001389 cpdma_chan_get_stats(cpsw->txv[ch].ch, &ch_stats);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001390 for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
1391 p = (u8 *)&ch_stats +
1392 cpsw_gstrings_ch_stats[i].stat_offset;
1393 data[l] = *(u32 *)p;
Mugunthan V Nd9718542013-07-23 15:38:17 +05301394 }
1395 }
1396}
1397
Ivan Khoronzhuk27e9e102016-08-10 02:22:32 +03001398static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001399 struct sk_buff *skb,
1400 struct cpdma_chan *txch)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001401{
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001402 struct cpsw_common *cpsw = priv->cpsw;
1403
Ivan Khoronzhuk98fdd852017-06-27 16:58:51 +03001404 skb_tx_timestamp(skb);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001405 return cpdma_chan_submit(txch, skb, skb->data, skb->len,
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001406 priv->emac_port + cpsw->data.dual_emac);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001407}
1408
1409static inline void cpsw_add_dual_emac_def_ale_entries(
1410 struct cpsw_priv *priv, struct cpsw_slave *slave,
1411 u32 slave_port)
1412{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001413 struct cpsw_common *cpsw = priv->cpsw;
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03001414 u32 port_mask = 1 << slave_port | ALE_PORT_HOST;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001415
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001416 if (cpsw->version == CPSW_VERSION_1)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001417 slave_write(slave, slave->port_vlan, CPSW1_PORT_VLAN);
1418 else
1419 slave_write(slave, slave->port_vlan, CPSW2_PORT_VLAN);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001420 cpsw_ale_add_vlan(cpsw->ale, slave->port_vlan, port_mask,
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001421 port_mask, port_mask, 0);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001422 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03001423 ALE_PORT_HOST, ALE_VLAN, slave->port_vlan, 0);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001424 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
1425 HOST_PORT_NUM, ALE_VLAN |
1426 ALE_SECURE, slave->port_vlan);
Grygorii Strashko5e5add12018-05-01 12:41:22 -05001427 cpsw_ale_control_set(cpsw->ale, slave_port,
1428 ALE_PORT_DROP_UNKNOWN_VLAN, 1);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001429}
1430
Daniel Mack1e7a2e22013-11-15 08:29:16 +01001431static void soft_reset_slave(struct cpsw_slave *slave)
Mugunthan V Ndf828592012-03-18 20:17:54 +00001432{
1433 char name[32];
Daniel Mack1e7a2e22013-11-15 08:29:16 +01001434
1435 snprintf(name, sizeof(name), "slave-%d", slave->slave_num);
1436 soft_reset(name, &slave->sliver->soft_reset);
1437}
1438
1439static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
1440{
Mugunthan V Ndf828592012-03-18 20:17:54 +00001441 u32 slave_port;
Sekhar Nori30c57f02017-04-03 17:34:28 +05301442 struct phy_device *phy;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03001443 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001444
Daniel Mack1e7a2e22013-11-15 08:29:16 +01001445 soft_reset_slave(slave);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001446
1447 /* setup priority mapping */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001448 writel_relaxed(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
Richard Cochran9750a3a2012-10-29 08:45:15 +00001449
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001450 switch (cpsw->version) {
Richard Cochran9750a3a2012-10-29 08:45:15 +00001451 case CPSW_VERSION_1:
1452 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
Grygorii Strashko48f5bcc2017-05-08 14:21:21 -05001453 /* Increase RX FIFO size to 5 for supporting fullduplex
1454 * flow control mode
1455 */
1456 slave_write(slave,
1457 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1458 CPSW_MAX_BLKS_RX, CPSW1_MAX_BLKS);
Richard Cochran9750a3a2012-10-29 08:45:15 +00001459 break;
1460 case CPSW_VERSION_2:
Mugunthan V Nc193f362013-08-05 17:30:05 +05301461 case CPSW_VERSION_3:
Mugunthan V N926489b2013-08-12 17:11:15 +05301462 case CPSW_VERSION_4:
Richard Cochran9750a3a2012-10-29 08:45:15 +00001463 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
Grygorii Strashko48f5bcc2017-05-08 14:21:21 -05001464 /* Increase RX FIFO size to 5 for supporting fullduplex
1465 * flow control mode
1466 */
1467 slave_write(slave,
1468 (CPSW_MAX_BLKS_TX << CPSW_MAX_BLKS_TX_SHIFT) |
1469 CPSW_MAX_BLKS_RX, CPSW2_MAX_BLKS);
Richard Cochran9750a3a2012-10-29 08:45:15 +00001470 break;
1471 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001472
1473 /* setup max packet size, and mac address */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001474 writel_relaxed(cpsw->rx_packet_max, &slave->sliver->rx_maxlen);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001475 cpsw_set_slave_mac(slave, priv);
1476
1477 slave->mac_control = 0; /* no link yet */
1478
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +03001479 slave_port = cpsw_get_slave_port(slave->slave_num);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001480
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001481 if (cpsw->data.dual_emac)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001482 cpsw_add_dual_emac_def_ale_entries(priv, slave, slave_port);
1483 else
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001484 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001485 1 << slave_port, 0, 0, ALE_MCAST_FWD_2);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001486
David Rivshind733f7542016-04-27 21:32:31 -04001487 if (slave->data->phy_node) {
Sekhar Nori30c57f02017-04-03 17:34:28 +05301488 phy = of_phy_connect(priv->ndev, slave->data->phy_node,
Heiko Schocher9e42f712015-10-17 06:04:35 +02001489 &cpsw_adjust_link, 0, slave->data->phy_if);
Sekhar Nori30c57f02017-04-03 17:34:28 +05301490 if (!phy) {
Rob Herringf7ce9102017-07-18 16:43:19 -05001491 dev_err(priv->dev, "phy \"%pOF\" not found on slave %d\n",
1492 slave->data->phy_node,
David Rivshind733f7542016-04-27 21:32:31 -04001493 slave->slave_num);
1494 return;
1495 }
1496 } else {
Sekhar Nori30c57f02017-04-03 17:34:28 +05301497 phy = phy_connect(priv->ndev, slave->data->phy_id,
Florian Fainellif9a8f832013-01-14 00:52:52 +00001498 &cpsw_adjust_link, slave->data->phy_if);
Sekhar Nori30c57f02017-04-03 17:34:28 +05301499 if (IS_ERR(phy)) {
David Rivshind733f7542016-04-27 21:32:31 -04001500 dev_err(priv->dev,
1501 "phy \"%s\" not found on slave %d, err %ld\n",
1502 slave->data->phy_id, slave->slave_num,
Sekhar Nori30c57f02017-04-03 17:34:28 +05301503 PTR_ERR(phy));
David Rivshind733f7542016-04-27 21:32:31 -04001504 return;
1505 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001506 }
David Rivshind733f7542016-04-27 21:32:31 -04001507
Sekhar Nori30c57f02017-04-03 17:34:28 +05301508 slave->phy = phy;
1509
David Rivshind733f7542016-04-27 21:32:31 -04001510 phy_attached_info(slave->phy);
1511
1512 phy_start(slave->phy);
1513
1514 /* Configure GMII_SEL register */
Grygorii Strashko3ff18842018-11-25 18:15:25 -06001515 if (!IS_ERR(slave->data->ifphy))
1516 phy_set_mode_ext(slave->data->ifphy, PHY_MODE_ETHERNET,
1517 slave->data->phy_if);
1518 else
1519 cpsw_phy_sel(cpsw->dev, slave->phy->interface,
1520 slave->slave_num);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001521}
1522
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001523static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1524{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001525 struct cpsw_common *cpsw = priv->cpsw;
1526 const int vlan = cpsw->data.default_vlan;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001527 u32 reg;
1528 int i;
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -04001529 int unreg_mcast_mask;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001530
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001531 reg = (cpsw->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001532 CPSW2_PORT_VLAN;
1533
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001534 writel(vlan, &cpsw->host_port_regs->port_vlan);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001535
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001536 for (i = 0; i < cpsw->data.slaves; i++)
1537 slave_write(cpsw->slaves + i, vlan, reg);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001538
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -04001539 if (priv->ndev->flags & IFF_ALLMULTI)
1540 unreg_mcast_mask = ALE_ALL_PORTS;
1541 else
1542 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1543
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001544 cpsw_ale_add_vlan(cpsw->ale, vlan, ALE_ALL_PORTS,
Grygorii Strashko61f1cef2016-04-07 15:16:43 +03001545 ALE_ALL_PORTS, ALE_ALL_PORTS,
1546 unreg_mcast_mask);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001547}
1548
Mugunthan V Ndf828592012-03-18 20:17:54 +00001549static void cpsw_init_host_port(struct cpsw_priv *priv)
1550{
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001551 u32 fifo_mode;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001552 u32 control_reg;
1553 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001554
Mugunthan V Ndf828592012-03-18 20:17:54 +00001555 /* soft reset the controller and initialize ale */
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001556 soft_reset("cpsw", &cpsw->regs->soft_reset);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001557 cpsw_ale_start(cpsw->ale);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001558
1559 /* switch to vlan unaware mode */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001560 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM, ALE_VLAN_AWARE,
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001561 CPSW_ALE_VLAN_AWARE);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001562 control_reg = readl(&cpsw->regs->control);
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -05001563 control_reg |= CPSW_VLAN_AWARE | CPSW_RX_VLAN_ENCAP;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001564 writel(control_reg, &cpsw->regs->control);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001565 fifo_mode = (cpsw->data.dual_emac) ? CPSW_FIFO_DUAL_MAC_MODE :
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001566 CPSW_FIFO_NORMAL_MODE;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001567 writel(fifo_mode, &cpsw->host_port_regs->tx_in_ctl);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001568
1569 /* setup host port priority mapping */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001570 writel_relaxed(CPDMA_TX_PRIORITY_MAP,
1571 &cpsw->host_port_regs->cpdma_tx_pri_map);
1572 writel_relaxed(0, &cpsw->host_port_regs->cpdma_rx_chan_map);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001573
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001574 cpsw_ale_control_set(cpsw->ale, HOST_PORT_NUM,
Mugunthan V Ndf828592012-03-18 20:17:54 +00001575 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1576
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001577 if (!cpsw->data.dual_emac) {
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001578 cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001579 0, 0);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001580 cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03001581 ALE_PORT_HOST, 0, 0, ALE_MCAST_FWD_2);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001582 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001583}
1584
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001585static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1586{
1587 struct cpsw_common *cpsw = priv->cpsw;
1588 struct sk_buff *skb;
1589 int ch_buf_num;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001590 int ch, i, ret;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001591
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001592 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001593 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001594 for (i = 0; i < ch_buf_num; i++) {
1595 skb = __netdev_alloc_skb_ip_align(priv->ndev,
1596 cpsw->rx_packet_max,
1597 GFP_KERNEL);
1598 if (!skb) {
1599 cpsw_err(priv, ifup, "cannot allocate skb\n");
1600 return -ENOMEM;
1601 }
1602
1603 skb_set_queue_mapping(skb, ch);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02001604 ret = cpdma_chan_submit(cpsw->rxv[ch].ch, skb,
1605 skb->data, skb_tailroom(skb),
1606 0);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001607 if (ret < 0) {
1608 cpsw_err(priv, ifup,
1609 "cannot submit skb to channel %d rx, error %d\n",
1610 ch, ret);
1611 kfree_skb(skb);
1612 return ret;
1613 }
1614 kmemleak_not_leak(skb);
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001615 }
1616
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001617 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1618 ch, ch_buf_num);
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001619 }
1620
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001621 return 0;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001622}
1623
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001624static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001625{
Schuyler Patton3995d262014-03-03 16:19:06 +05301626 u32 slave_port;
1627
Ivan Khoronzhuk6f1f5832016-08-10 02:22:34 +03001628 slave_port = cpsw_get_slave_port(slave->slave_num);
Schuyler Patton3995d262014-03-03 16:19:06 +05301629
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001630 if (!slave->phy)
1631 return;
1632 phy_stop(slave->phy);
1633 phy_disconnect(slave->phy);
1634 slave->phy = NULL;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001635 cpsw_ale_control_set(cpsw->ale, slave_port,
Schuyler Patton3995d262014-03-03 16:19:06 +05301636 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
Grygorii Strashko1f95ba02016-06-24 21:23:41 +03001637 soft_reset_slave(slave);
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001638}
1639
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03001640static int cpsw_tc_to_fifo(int tc, int num_tc)
1641{
1642 if (tc == num_tc - 1)
1643 return 0;
1644
1645 return CPSW_FIFO_SHAPERS_NUM - tc;
1646}
1647
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03001648static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
1649{
1650 struct cpsw_common *cpsw = priv->cpsw;
1651 u32 val = 0, send_pct, shift;
1652 struct cpsw_slave *slave;
1653 int pct = 0, i;
1654
1655 if (bw > priv->shp_cfg_speed * 1000)
1656 goto err;
1657
1658 /* shaping has to stay enabled for highest fifos linearly
1659 * and fifo bw no more then interface can allow
1660 */
1661 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1662 send_pct = slave_read(slave, SEND_PERCENT);
1663 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
1664 if (!bw) {
1665 if (i >= fifo || !priv->fifo_bw[i])
1666 continue;
1667
1668 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
1669 continue;
1670 }
1671
1672 if (!priv->fifo_bw[i] && i > fifo) {
1673 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
1674 return -EINVAL;
1675 }
1676
1677 shift = (i - 1) * 8;
1678 if (i == fifo) {
1679 send_pct &= ~(CPSW_PCT_MASK << shift);
1680 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
1681 if (!val)
1682 val = 1;
1683
1684 send_pct |= val << shift;
1685 pct += val;
1686 continue;
1687 }
1688
1689 if (priv->fifo_bw[i])
1690 pct += (send_pct >> shift) & CPSW_PCT_MASK;
1691 }
1692
1693 if (pct >= 100)
1694 goto err;
1695
1696 slave_write(slave, send_pct, SEND_PERCENT);
1697 priv->fifo_bw[fifo] = bw;
1698
1699 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
1700 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
1701
1702 return 0;
1703err:
1704 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
1705 return -EINVAL;
1706}
1707
1708static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
1709{
1710 struct cpsw_common *cpsw = priv->cpsw;
1711 struct cpsw_slave *slave;
1712 u32 tx_in_ctl_rg, val;
1713 int ret;
1714
1715 ret = cpsw_set_fifo_bw(priv, fifo, bw);
1716 if (ret)
1717 return ret;
1718
1719 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1720 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
1721 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
1722
1723 if (!bw)
1724 cpsw_fifo_shp_on(priv, fifo, bw);
1725
1726 val = slave_read(slave, tx_in_ctl_rg);
1727 if (cpsw_shp_is_off(priv)) {
1728 /* disable FIFOs rate limited queues */
1729 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
1730
1731 /* set type of FIFO queues to normal priority mode */
1732 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
1733
1734 /* set type of FIFO queues to be rate limited */
1735 if (bw)
1736 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
1737 else
1738 priv->shp_cfg_speed = 0;
1739 }
1740
1741 /* toggle a FIFO rate limited queue */
1742 if (bw)
1743 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1744 else
1745 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
1746 slave_write(slave, val, tx_in_ctl_rg);
1747
1748 /* FIFO transmit shape enable */
1749 cpsw_fifo_shp_on(priv, fifo, bw);
1750 return 0;
1751}
1752
1753/* Defaults:
1754 * class A - prio 3
1755 * class B - prio 2
1756 * shaping for class A should be set first
1757 */
1758static int cpsw_set_cbs(struct net_device *ndev,
1759 struct tc_cbs_qopt_offload *qopt)
1760{
1761 struct cpsw_priv *priv = netdev_priv(ndev);
1762 struct cpsw_common *cpsw = priv->cpsw;
1763 struct cpsw_slave *slave;
1764 int prev_speed = 0;
1765 int tc, ret, fifo;
1766 u32 bw = 0;
1767
1768 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
1769
1770 /* enable channels in backward order, as highest FIFOs must be rate
1771 * limited first and for compliance with CPDMA rate limited channels
1772 * that also used in bacward order. FIFO0 cannot be rate limited.
1773 */
1774 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
1775 if (!fifo) {
1776 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
1777 return -EINVAL;
1778 }
1779
1780 /* do nothing, it's disabled anyway */
1781 if (!qopt->enable && !priv->fifo_bw[fifo])
1782 return 0;
1783
1784 /* shapers can be set if link speed is known */
1785 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1786 if (slave->phy && slave->phy->link) {
1787 if (priv->shp_cfg_speed &&
1788 priv->shp_cfg_speed != slave->phy->speed)
1789 prev_speed = priv->shp_cfg_speed;
1790
1791 priv->shp_cfg_speed = slave->phy->speed;
1792 }
1793
1794 if (!priv->shp_cfg_speed) {
1795 dev_err(priv->dev, "Link speed is not known");
1796 return -1;
1797 }
1798
1799 ret = pm_runtime_get_sync(cpsw->dev);
1800 if (ret < 0) {
1801 pm_runtime_put_noidle(cpsw->dev);
1802 return ret;
1803 }
1804
1805 bw = qopt->enable ? qopt->idleslope : 0;
1806 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
1807 if (ret) {
1808 priv->shp_cfg_speed = prev_speed;
1809 prev_speed = 0;
1810 }
1811
1812 if (bw && prev_speed)
1813 dev_warn(priv->dev,
1814 "Speed was changed, CBS shaper speeds are changed!");
1815
1816 pm_runtime_put_sync(cpsw->dev);
1817 return ret;
1818}
1819
Ivan Khoronzhuk4b4255e2018-07-24 00:26:33 +03001820static void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1821{
1822 int fifo, bw;
1823
1824 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1825 bw = priv->fifo_bw[fifo];
1826 if (!bw)
1827 continue;
1828
1829 cpsw_set_fifo_rlimit(priv, fifo, bw);
1830 }
1831}
1832
1833static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1834{
1835 struct cpsw_common *cpsw = priv->cpsw;
1836 u32 tx_prio_map = 0;
1837 int i, tc, fifo;
1838 u32 tx_prio_rg;
1839
1840 if (!priv->mqprio_hw)
1841 return;
1842
1843 for (i = 0; i < 8; i++) {
1844 tc = netdev_get_prio_tc_map(priv->ndev, i);
1845 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1846 tx_prio_map |= fifo << (4 * i);
1847 }
1848
1849 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1850 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1851
1852 slave_write(slave, tx_prio_map, tx_prio_rg);
1853}
1854
1855/* restore resources after port reset */
1856static void cpsw_restore(struct cpsw_priv *priv)
1857{
1858 /* restore MQPRIO offload */
1859 for_each_slave(priv, cpsw_mqprio_resume, priv);
1860
1861 /* restore CBS offload */
1862 for_each_slave(priv, cpsw_cbs_resume, priv);
1863}
1864
Mugunthan V Ndf828592012-03-18 20:17:54 +00001865static int cpsw_ndo_open(struct net_device *ndev)
1866{
1867 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03001868 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001869 int ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001870 u32 reg;
1871
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03001872 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashko108a6532016-06-24 21:23:42 +03001873 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03001874 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashko108a6532016-06-24 21:23:42 +03001875 return ret;
1876 }
Grygorii Strashko3fa88c52016-04-19 21:09:49 +03001877
Mugunthan V Ndf828592012-03-18 20:17:54 +00001878 netif_carrier_off(ndev);
1879
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001880 /* Notify the stack of the actual queue counts. */
1881 ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
1882 if (ret) {
1883 dev_err(priv->dev, "cannot set real number of tx queues\n");
1884 goto err_cleanup;
1885 }
1886
1887 ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
1888 if (ret) {
1889 dev_err(priv->dev, "cannot set real number of rx queues\n");
1890 goto err_cleanup;
1891 }
1892
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001893 reg = cpsw->version;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001894
1895 dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
1896 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
1897 CPSW_RTL_VERSION(reg));
1898
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02001899 /* Initialize host and slave ports */
1900 if (!cpsw->usage_count)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001901 cpsw_init_host_port(priv);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001902 for_each_slave(priv, cpsw_slave_open, priv);
1903
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001904 /* Add default VLAN */
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03001905 if (!cpsw->data.dual_emac)
Mugunthan V Ne6afea02014-06-18 17:21:48 +05301906 cpsw_add_default_vlan(priv);
1907 else
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001908 cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
Grygorii Strashko61f1cef2016-04-07 15:16:43 +03001909 ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00001910
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02001911 /* initialize shared resources for every ndev */
1912 if (!cpsw->usage_count) {
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001913 /* disable priority elevation */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001914 writel_relaxed(0, &cpsw->regs->ptype);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001915
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001916 /* enable statistics collection only on all ports */
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06001917 writel_relaxed(0x7, &cpsw->regs->stat_port_en);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001918
Mugunthan V N1923d6e2014-09-08 22:54:02 +05301919 /* Enable internal fifo flow control */
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03001920 writel(0x7, &cpsw->regs->flow_control);
Mugunthan V N1923d6e2014-09-08 22:54:02 +05301921
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001922 napi_enable(&cpsw->napi_rx);
1923 napi_enable(&cpsw->napi_tx);
Mugunthan V Nd354eb82015-08-04 16:06:19 +05301924
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001925 if (cpsw->tx_irq_disabled) {
1926 cpsw->tx_irq_disabled = false;
1927 enable_irq(cpsw->irqs_table[1]);
Mugunthan V N7da11602015-08-12 15:22:53 +05301928 }
1929
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03001930 if (cpsw->rx_irq_disabled) {
1931 cpsw->rx_irq_disabled = false;
1932 enable_irq(cpsw->irqs_table[0]);
Mugunthan V N7da11602015-08-12 15:22:53 +05301933 }
1934
Ivan Khoronzhuk3802dce12016-08-22 21:18:24 +03001935 ret = cpsw_fill_rx_channels(priv);
1936 if (ret < 0)
1937 goto err_cleanup;
Mugunthan V Nf280e892013-12-11 22:09:05 -06001938
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06001939 if (cpts_register(cpsw->cpts))
Mugunthan V Nf280e892013-12-11 22:09:05 -06001940 dev_err(priv->dev, "error registering cpts device\n");
1941
Mugunthan V Ndf828592012-03-18 20:17:54 +00001942 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00001943
Ivan Khoronzhuk4b4255e2018-07-24 00:26:33 +03001944 cpsw_restore(priv);
1945
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001946 /* Enable Interrupt pacing if configured */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001947 if (cpsw->coal_intvl != 0) {
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001948 struct ethtool_coalesce coal;
1949
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001950 coal.rx_coalesce_usecs = cpsw->coal_intvl;
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00001951 cpsw_set_coalesce(ndev, &coal);
1952 }
1953
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001954 cpdma_ctlr_start(cpsw->dma);
1955 cpsw_intr_enable(cpsw);
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02001956 cpsw->usage_count++;
Mugunthan V Nf63a9752014-04-10 14:23:24 +05301957
Mugunthan V Ndf828592012-03-18 20:17:54 +00001958 return 0;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001959
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001960err_cleanup:
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001961 cpdma_ctlr_stop(cpsw->dma);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001962 for_each_slave(priv, cpsw_slave_stop, cpsw);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03001963 pm_runtime_put_sync(cpsw->dev);
Sebastian Siewioraacebbf2013-04-23 07:31:36 +00001964 netif_carrier_off(priv->ndev);
1965 return ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001966}
1967
1968static int cpsw_ndo_stop(struct net_device *ndev)
1969{
1970 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03001971 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00001972
1973 cpsw_info(priv, ifdown, "shutting down cpsw device\n");
Ivan Khoronzhuk5da19482018-10-12 18:28:15 +03001974 __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03001975 netif_tx_stop_all_queues(priv->ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001976 netif_carrier_off(priv->ndev);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001977
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02001978 if (cpsw->usage_count <= 1) {
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03001979 napi_disable(&cpsw->napi_rx);
1980 napi_disable(&cpsw->napi_tx);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001981 cpts_unregister(cpsw->cpts);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03001982 cpsw_intr_disable(cpsw);
1983 cpdma_ctlr_stop(cpsw->dma);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001984 cpsw_ale_stop(cpsw->ale);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00001985 }
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03001986 for_each_slave(priv, cpsw_slave_stop, cpsw);
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02001987
1988 if (cpsw_need_resplit(cpsw))
1989 cpsw_split_res(ndev);
1990
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02001991 cpsw->usage_count--;
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03001992 pm_runtime_put_sync(cpsw->dev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00001993 return 0;
1994}
1995
1996static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
1997 struct net_device *ndev)
1998{
1999 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002000 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhukf44f8412017-06-27 16:58:52 +03002001 struct cpts *cpts = cpsw->cpts;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002002 struct netdev_queue *txq;
2003 struct cpdma_chan *txch;
2004 int ret, q_idx;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002005
Mugunthan V Ndf828592012-03-18 20:17:54 +00002006 if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
2007 cpsw_err(priv, tx_err, "packet pad failed\n");
Tobias Klauser8dc43dd2014-03-10 13:12:23 +01002008 ndev->stats.tx_dropped++;
Ivan Khoronzhuk1bf96052017-02-11 03:49:57 +02002009 return NET_XMIT_DROP;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002010 }
2011
Mugunthan V N9232b162013-02-11 09:52:19 +00002012 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
Ivan Khoronzhukf44f8412017-06-27 16:58:52 +03002013 cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb))
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002014 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2015
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002016 q_idx = skb_get_queue_mapping(skb);
2017 if (q_idx >= cpsw->tx_ch_num)
2018 q_idx = q_idx % cpsw->tx_ch_num;
2019
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002020 txch = cpsw->txv[q_idx].ch;
Grygorii Strashko62f94c22018-02-06 19:17:06 -06002021 txq = netdev_get_tx_queue(ndev, q_idx);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002022 ret = cpsw_tx_packet_submit(priv, skb, txch);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002023 if (unlikely(ret != 0)) {
2024 cpsw_err(priv, tx_err, "desc submit failed\n");
2025 goto fail;
2026 }
2027
Mugunthan V Nfae50822013-01-17 06:31:34 +00002028 /* If there is no more tx desc left free then we need to
2029 * tell the kernel to stop sending us tx frames.
2030 */
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002031 if (unlikely(!cpdma_check_free_tx_desc(txch))) {
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002032 netif_tx_stop_queue(txq);
Grygorii Strashko62f94c22018-02-06 19:17:06 -06002033
2034 /* Barrier, so that stop_queue visible to other cpus */
2035 smp_mb__after_atomic();
2036
2037 if (cpdma_check_free_tx_desc(txch))
2038 netif_tx_wake_queue(txq);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002039 }
Mugunthan V Nfae50822013-01-17 06:31:34 +00002040
Mugunthan V Ndf828592012-03-18 20:17:54 +00002041 return NETDEV_TX_OK;
2042fail:
Tobias Klauser8dc43dd2014-03-10 13:12:23 +01002043 ndev->stats.tx_dropped++;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002044 netif_tx_stop_queue(txq);
Grygorii Strashko62f94c22018-02-06 19:17:06 -06002045
2046 /* Barrier, so that stop_queue visible to other cpus */
2047 smp_mb__after_atomic();
2048
2049 if (cpdma_check_free_tx_desc(txch))
2050 netif_tx_wake_queue(txq);
2051
Mugunthan V Ndf828592012-03-18 20:17:54 +00002052 return NETDEV_TX_BUSY;
2053}
2054
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002055#if IS_ENABLED(CONFIG_TI_CPTS)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002056
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002057static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002058{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002059 struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave];
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002060 u32 ts_en, seq_id;
2061
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002062 if (!cpts_is_tx_enabled(cpsw->cpts) &&
2063 !cpts_is_rx_enabled(cpsw->cpts)) {
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002064 slave_write(slave, 0, CPSW1_TS_CTL);
2065 return;
2066 }
2067
2068 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
2069 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
2070
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002071 if (cpts_is_tx_enabled(cpsw->cpts))
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002072 ts_en |= CPSW_V1_TS_TX_EN;
2073
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002074 if (cpts_is_rx_enabled(cpsw->cpts))
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002075 ts_en |= CPSW_V1_TS_RX_EN;
2076
2077 slave_write(slave, ts_en, CPSW1_TS_CTL);
2078 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
2079}
2080
2081static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
2082{
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002083 struct cpsw_slave *slave;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03002084 struct cpsw_common *cpsw = priv->cpsw;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002085 u32 ctrl, mtype;
2086
Ivan Khoronzhukcb7d78d02016-12-10 14:23:46 +02002087 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00002088
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002089 ctrl = slave_read(slave, CPSW2_CONTROL);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002090 switch (cpsw->version) {
George Cherian09c55372014-05-02 12:02:02 +05302091 case CPSW_VERSION_2:
2092 ctrl &= ~CTRL_V2_ALL_TS_MASK;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002093
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002094 if (cpts_is_tx_enabled(cpsw->cpts))
George Cherian09c55372014-05-02 12:02:02 +05302095 ctrl |= CTRL_V2_TX_TS_BITS;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002096
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002097 if (cpts_is_rx_enabled(cpsw->cpts))
George Cherian09c55372014-05-02 12:02:02 +05302098 ctrl |= CTRL_V2_RX_TS_BITS;
Richard Cochran26fe7eb2015-05-25 11:02:13 +02002099 break;
George Cherian09c55372014-05-02 12:02:02 +05302100 case CPSW_VERSION_3:
2101 default:
2102 ctrl &= ~CTRL_V3_ALL_TS_MASK;
2103
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002104 if (cpts_is_tx_enabled(cpsw->cpts))
George Cherian09c55372014-05-02 12:02:02 +05302105 ctrl |= CTRL_V3_TX_TS_BITS;
2106
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002107 if (cpts_is_rx_enabled(cpsw->cpts))
George Cherian09c55372014-05-02 12:02:02 +05302108 ctrl |= CTRL_V3_RX_TS_BITS;
Richard Cochran26fe7eb2015-05-25 11:02:13 +02002109 break;
George Cherian09c55372014-05-02 12:02:02 +05302110 }
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002111
2112 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
2113
2114 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
2115 slave_write(slave, ctrl, CPSW2_CONTROL);
Grygorii Strashkodda5f5fe2017-11-30 18:21:11 -06002116 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002117}
2118
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002119static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002120{
Mugunthan V N3177bf62012-11-27 07:53:40 +00002121 struct cpsw_priv *priv = netdev_priv(dev);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002122 struct hwtstamp_config cfg;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002123 struct cpsw_common *cpsw = priv->cpsw;
2124 struct cpts *cpts = cpsw->cpts;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002125
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002126 if (cpsw->version != CPSW_VERSION_1 &&
2127 cpsw->version != CPSW_VERSION_2 &&
2128 cpsw->version != CPSW_VERSION_3)
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002129 return -EOPNOTSUPP;
2130
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002131 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
2132 return -EFAULT;
2133
2134 /* reserved for future extensions */
2135 if (cfg.flags)
2136 return -EINVAL;
2137
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002138 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002139 return -ERANGE;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002140
2141 switch (cfg.rx_filter) {
2142 case HWTSTAMP_FILTER_NONE:
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002143 cpts_rx_enable(cpts, 0);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002144 break;
2145 case HWTSTAMP_FILTER_ALL:
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002146 case HWTSTAMP_FILTER_NTP_ALL:
2147 return -ERANGE;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002148 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2149 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2150 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002151 cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT);
2152 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
2153 break;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002154 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2155 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2156 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2157 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2158 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2159 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2160 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2161 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2162 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002163 cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002164 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
2165 break;
2166 default:
2167 return -ERANGE;
2168 }
2169
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002170 cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON);
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002171
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002172 switch (cpsw->version) {
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002173 case CPSW_VERSION_1:
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002174 cpsw_hwtstamp_v1(cpsw);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002175 break;
2176 case CPSW_VERSION_2:
George Cherianf7d403c2014-05-02 12:02:01 +05302177 case CPSW_VERSION_3:
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002178 cpsw_hwtstamp_v2(priv);
2179 break;
2180 default:
Ben Hutchings2ee91e52013-11-14 00:47:36 +00002181 WARN_ON(1);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002182 }
2183
2184 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2185}
2186
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002187static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2188{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002189 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
2190 struct cpts *cpts = cpsw->cpts;
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002191 struct hwtstamp_config cfg;
2192
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002193 if (cpsw->version != CPSW_VERSION_1 &&
2194 cpsw->version != CPSW_VERSION_2 &&
2195 cpsw->version != CPSW_VERSION_3)
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002196 return -EOPNOTSUPP;
2197
2198 cfg.flags = 0;
Grygorii Strashkob63ba582016-12-06 18:00:35 -06002199 cfg.tx_type = cpts_is_tx_enabled(cpts) ?
2200 HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2201 cfg.rx_filter = (cpts_is_rx_enabled(cpts) ?
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002202 cpts->rx_enable : HWTSTAMP_FILTER_NONE);
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002203
2204 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
2205}
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002206#else
2207static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
2208{
2209 return -EOPNOTSUPP;
2210}
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002211
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002212static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
2213{
2214 return -EOPNOTSUPP;
2215}
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002216#endif /*CONFIG_TI_CPTS*/
2217
2218static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2219{
Mugunthan V N11f2c982013-03-11 23:16:38 +00002220 struct cpsw_priv *priv = netdev_priv(dev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002221 struct cpsw_common *cpsw = priv->cpsw;
2222 int slave_no = cpsw_slave_index(cpsw, priv);
Mugunthan V N11f2c982013-03-11 23:16:38 +00002223
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002224 if (!netif_running(dev))
2225 return -EINVAL;
2226
Mugunthan V N11f2c982013-03-11 23:16:38 +00002227 switch (cmd) {
Mugunthan V N11f2c982013-03-11 23:16:38 +00002228 case SIOCSHWTSTAMP:
Ben Hutchingsa5b41452013-11-18 23:23:40 +00002229 return cpsw_hwtstamp_set(dev, req);
2230 case SIOCGHWTSTAMP:
2231 return cpsw_hwtstamp_get(dev, req);
Mugunthan V N11f2c982013-03-11 23:16:38 +00002232 }
2233
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002234 if (!cpsw->slaves[slave_no].phy)
Stefan Sørensenc1b59942014-02-16 14:54:25 +01002235 return -EOPNOTSUPP;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002236 return phy_mii_ioctl(cpsw->slaves[slave_no].phy, req, cmd);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002237}
2238
Mugunthan V Ndf828592012-03-18 20:17:54 +00002239static void cpsw_ndo_tx_timeout(struct net_device *ndev)
2240{
2241 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002242 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002243 int ch;
Mugunthan V Ndf828592012-03-18 20:17:54 +00002244
2245 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
Tobias Klauser8dc43dd2014-03-10 13:12:23 +01002246 ndev->stats.tx_errors++;
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002247 cpsw_intr_disable(cpsw);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002248 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002249 cpdma_chan_stop(cpsw->txv[ch].ch);
2250 cpdma_chan_start(cpsw->txv[ch].ch);
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03002251 }
2252
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03002253 cpsw_intr_enable(cpsw);
Grygorii Strashko75514b62017-03-31 18:41:23 -05002254 netif_trans_update(ndev);
2255 netif_tx_wake_all_queues(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002256}
2257
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302258static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
2259{
2260 struct cpsw_priv *priv = netdev_priv(ndev);
2261 struct sockaddr *addr = (struct sockaddr *)p;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002262 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302263 int flags = 0;
2264 u16 vid = 0;
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002265 int ret;
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302266
2267 if (!is_valid_ether_addr(addr->sa_data))
2268 return -EADDRNOTAVAIL;
2269
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002270 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002271 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002272 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002273 return ret;
2274 }
2275
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002276 if (cpsw->data.dual_emac) {
2277 vid = cpsw->slaves[priv->emac_port].port_vlan;
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302278 flags = ALE_VLAN;
2279 }
2280
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002281 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, HOST_PORT_NUM,
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302282 flags, vid);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002283 cpsw_ale_add_ucast(cpsw->ale, addr->sa_data, HOST_PORT_NUM,
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302284 flags, vid);
2285
2286 memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN);
2287 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
2288 for_each_slave(priv, cpsw_set_slave_mac, priv);
2289
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002290 pm_runtime_put(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002291
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302292 return 0;
2293}
2294
Mugunthan V Ndf828592012-03-18 20:17:54 +00002295#ifdef CONFIG_NET_POLL_CONTROLLER
2296static void cpsw_ndo_poll_controller(struct net_device *ndev)
2297{
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03002298 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002299
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03002300 cpsw_intr_disable(cpsw);
2301 cpsw_rx_interrupt(cpsw->irqs_table[0], cpsw);
2302 cpsw_tx_interrupt(cpsw->irqs_table[1], cpsw);
2303 cpsw_intr_enable(cpsw);
Mugunthan V Ndf828592012-03-18 20:17:54 +00002304}
2305#endif
2306
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002307static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
2308 unsigned short vid)
2309{
2310 int ret;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302311 int unreg_mcast_mask = 0;
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002312 int mcast_mask;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302313 u32 port_mask;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002314 struct cpsw_common *cpsw = priv->cpsw;
Lennart Sorensen1e5c4bc2014-10-31 13:38:52 -04002315
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002316 if (cpsw->data.dual_emac) {
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302317 port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002318
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002319 mcast_mask = ALE_PORT_HOST;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302320 if (priv->ndev->flags & IFF_ALLMULTI)
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002321 unreg_mcast_mask = mcast_mask;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302322 } else {
2323 port_mask = ALE_ALL_PORTS;
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002324 mcast_mask = port_mask;
Mugunthan V N9f6bd8f2015-01-15 14:59:28 +05302325
2326 if (priv->ndev->flags & IFF_ALLMULTI)
2327 unreg_mcast_mask = ALE_ALL_PORTS;
2328 else
2329 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
2330 }
2331
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002332 ret = cpsw_ale_add_vlan(cpsw->ale, vid, port_mask, 0, port_mask,
Grygorii Strashko61f1cef2016-04-07 15:16:43 +03002333 unreg_mcast_mask);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002334 if (ret != 0)
2335 return ret;
2336
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002337 ret = cpsw_ale_add_ucast(cpsw->ale, priv->mac_addr,
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03002338 HOST_PORT_NUM, ALE_VLAN, vid);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002339 if (ret != 0)
2340 goto clean_vid;
2341
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002342 ret = cpsw_ale_add_mcast(cpsw->ale, priv->ndev->broadcast,
Ivan Khoronzhuk5b3a5a12018-10-12 19:06:29 +03002343 mcast_mask, ALE_VLAN, vid, 0);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002344 if (ret != 0)
2345 goto clean_vlan_ucast;
2346 return 0;
2347
2348clean_vlan_ucast:
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002349 cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
Grygorii Strashko71a2cbb2016-04-07 15:16:44 +03002350 HOST_PORT_NUM, ALE_VLAN, vid);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002351clean_vid:
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002352 cpsw_ale_del_vlan(cpsw->ale, vid, 0);
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002353 return ret;
2354}
2355
2356static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00002357 __be16 proto, u16 vid)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002358{
2359 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002360 struct cpsw_common *cpsw = priv->cpsw;
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002361 int ret;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002362
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002363 if (vid == cpsw->data.default_vlan)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002364 return 0;
2365
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002366 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002367 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002368 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002369 return ret;
2370 }
2371
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002372 if (cpsw->data.dual_emac) {
Mugunthan V N02a54162015-01-22 15:19:22 +05302373 /* In dual EMAC, reserved VLAN id should not be used for
2374 * creating VLAN interfaces as this can break the dual
2375 * EMAC port separation
2376 */
2377 int i;
2378
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002379 for (i = 0; i < cpsw->data.slaves; i++) {
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002380 if (vid == cpsw->slaves[i].port_vlan) {
2381 ret = -EINVAL;
2382 goto err;
2383 }
Mugunthan V N02a54162015-01-22 15:19:22 +05302384 }
2385 }
2386
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002387 dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002388 ret = cpsw_add_vlan_ale_entry(priv, vid);
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002389err:
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002390 pm_runtime_put(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002391 return ret;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002392}
2393
2394static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
Patrick McHardy80d5c362013-04-19 02:04:28 +00002395 __be16 proto, u16 vid)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002396{
2397 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002398 struct cpsw_common *cpsw = priv->cpsw;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002399 int ret;
2400
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002401 if (vid == cpsw->data.default_vlan)
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002402 return 0;
2403
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002404 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002405 if (ret < 0) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002406 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002407 return ret;
2408 }
2409
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002410 if (cpsw->data.dual_emac) {
Mugunthan V N02a54162015-01-22 15:19:22 +05302411 int i;
2412
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002413 for (i = 0; i < cpsw->data.slaves; i++) {
2414 if (vid == cpsw->slaves[i].port_vlan)
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002415 goto err;
Mugunthan V N02a54162015-01-22 15:19:22 +05302416 }
2417 }
2418
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002419 dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002420 ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
Ivan Khoronzhukbe35b982018-08-10 15:47:08 +03002421 ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
2422 HOST_PORT_NUM, ALE_VLAN, vid);
2423 ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
2424 0, ALE_VLAN, vid);
Ivan Khoronzhuk803c4f62018-08-10 15:47:09 +03002425err:
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002426 pm_runtime_put(cpsw->dev);
Grygorii Strashkoa6c5d142016-06-24 21:23:45 +03002427 return ret;
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002428}
2429
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002430static int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
2431{
2432 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002433 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk52986a22016-12-10 14:23:50 +02002434 struct cpsw_slave *slave;
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002435 u32 min_rate;
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002436 u32 ch_rate;
Ivan Khoronzhuk52986a22016-12-10 14:23:50 +02002437 int i, ret;
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002438
2439 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
2440 if (ch_rate == rate)
2441 return 0;
2442
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002443 ch_rate = rate * 1000;
2444 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
2445 if ((ch_rate < min_rate && ch_rate)) {
2446 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
2447 min_rate);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002448 return -EINVAL;
2449 }
2450
Ivan Khoronzhuk0be01b82016-12-10 14:23:49 +02002451 if (rate > cpsw->speed) {
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002452 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002453 return -EINVAL;
2454 }
2455
2456 ret = pm_runtime_get_sync(cpsw->dev);
2457 if (ret < 0) {
2458 pm_runtime_put_noidle(cpsw->dev);
2459 return ret;
2460 }
2461
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002462 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002463 pm_runtime_put(cpsw->dev);
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002464
2465 if (ret)
2466 return ret;
2467
Ivan Khoronzhuk52986a22016-12-10 14:23:50 +02002468 /* update rates for slaves tx queues */
2469 for (i = 0; i < cpsw->data.slaves; i++) {
2470 slave = &cpsw->slaves[i];
2471 if (!slave->ndev)
2472 continue;
2473
2474 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
2475 }
2476
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002477 cpsw_split_res(ndev);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002478 return ret;
2479}
2480
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03002481static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
2482{
2483 struct tc_mqprio_qopt_offload *mqprio = type_data;
2484 struct cpsw_priv *priv = netdev_priv(ndev);
2485 struct cpsw_common *cpsw = priv->cpsw;
2486 int fifo, num_tc, count, offset;
2487 struct cpsw_slave *slave;
2488 u32 tx_prio_map = 0;
2489 int i, tc, ret;
2490
2491 num_tc = mqprio->qopt.num_tc;
2492 if (num_tc > CPSW_TC_NUM)
2493 return -EINVAL;
2494
2495 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
2496 return -EINVAL;
2497
2498 ret = pm_runtime_get_sync(cpsw->dev);
2499 if (ret < 0) {
2500 pm_runtime_put_noidle(cpsw->dev);
2501 return ret;
2502 }
2503
2504 if (num_tc) {
2505 for (i = 0; i < 8; i++) {
2506 tc = mqprio->qopt.prio_tc_map[i];
2507 fifo = cpsw_tc_to_fifo(tc, num_tc);
2508 tx_prio_map |= fifo << (4 * i);
2509 }
2510
2511 netdev_set_num_tc(ndev, num_tc);
2512 for (i = 0; i < num_tc; i++) {
2513 count = mqprio->qopt.count[i];
2514 offset = mqprio->qopt.offset[i];
2515 netdev_set_tc_queue(ndev, i, count, offset);
2516 }
2517 }
2518
2519 if (!mqprio->qopt.hw) {
2520 /* restore default configuration */
2521 netdev_reset_tc(ndev);
2522 tx_prio_map = TX_PRIORITY_MAPPING;
2523 }
2524
2525 priv->mqprio_hw = mqprio->qopt.hw;
2526
2527 offset = cpsw->version == CPSW_VERSION_1 ?
2528 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
2529
2530 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
2531 slave_write(slave, tx_prio_map, offset);
2532
2533 pm_runtime_put_sync(cpsw->dev);
2534
2535 return 0;
2536}
2537
2538static int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
2539 void *type_data)
2540{
2541 switch (type) {
Ivan Khoronzhuk57d90142018-07-24 00:26:32 +03002542 case TC_SETUP_QDISC_CBS:
2543 return cpsw_set_cbs(ndev, type_data);
2544
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03002545 case TC_SETUP_QDISC_MQPRIO:
2546 return cpsw_set_mqprio(ndev, type_data);
2547
2548 default:
2549 return -EOPNOTSUPP;
2550 }
2551}
2552
Mugunthan V Ndf828592012-03-18 20:17:54 +00002553static const struct net_device_ops cpsw_netdev_ops = {
2554 .ndo_open = cpsw_ndo_open,
2555 .ndo_stop = cpsw_ndo_stop,
2556 .ndo_start_xmit = cpsw_ndo_start_xmit,
Mugunthan V Ndcfd8d52013-07-25 23:44:01 +05302557 .ndo_set_mac_address = cpsw_ndo_set_mac_address,
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002558 .ndo_do_ioctl = cpsw_ndo_ioctl,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002559 .ndo_validate_addr = eth_validate_addr,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002560 .ndo_tx_timeout = cpsw_ndo_tx_timeout,
Mugunthan V N5c50a852012-10-29 08:45:11 +00002561 .ndo_set_rx_mode = cpsw_ndo_set_rx_mode,
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002562 .ndo_set_tx_maxrate = cpsw_ndo_set_tx_maxrate,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002563#ifdef CONFIG_NET_POLL_CONTROLLER
2564 .ndo_poll_controller = cpsw_ndo_poll_controller,
2565#endif
Mugunthan V N3b72c2f2013-02-05 08:26:48 +00002566 .ndo_vlan_rx_add_vid = cpsw_ndo_vlan_rx_add_vid,
2567 .ndo_vlan_rx_kill_vid = cpsw_ndo_vlan_rx_kill_vid,
Ivan Khoronzhuk7929a662018-07-24 00:26:31 +03002568 .ndo_setup_tc = cpsw_ndo_setup_tc,
Mugunthan V Ndf828592012-03-18 20:17:54 +00002569};
2570
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302571static int cpsw_get_regs_len(struct net_device *ndev)
2572{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002573 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302574
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002575 return cpsw->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302576}
2577
2578static void cpsw_get_regs(struct net_device *ndev,
2579 struct ethtool_regs *regs, void *p)
2580{
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302581 u32 *reg = p;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002582 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302583
2584 /* update CPSW IP version */
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002585 regs->version = cpsw->version;
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302586
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002587 cpsw_ale_dump(cpsw->ale, reg);
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302588}
2589
Mugunthan V Ndf828592012-03-18 20:17:54 +00002590static void cpsw_get_drvinfo(struct net_device *ndev,
2591 struct ethtool_drvinfo *info)
2592{
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002593 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002594 struct platform_device *pdev = to_platform_device(cpsw->dev);
Jiri Pirko7826d432013-01-06 00:44:26 +00002595
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05302596 strlcpy(info->driver, "cpsw", sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +00002597 strlcpy(info->version, "1.0", sizeof(info->version));
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002598 strlcpy(info->bus_info, pdev->name, sizeof(info->bus_info));
Mugunthan V Ndf828592012-03-18 20:17:54 +00002599}
2600
2601static u32 cpsw_get_msglevel(struct net_device *ndev)
2602{
2603 struct cpsw_priv *priv = netdev_priv(ndev);
2604 return priv->msg_enable;
2605}
2606
2607static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
2608{
2609 struct cpsw_priv *priv = netdev_priv(ndev);
2610 priv->msg_enable = value;
2611}
2612
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002613#if IS_ENABLED(CONFIG_TI_CPTS)
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002614static int cpsw_get_ts_info(struct net_device *ndev,
2615 struct ethtool_ts_info *info)
2616{
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002617 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002618
2619 info->so_timestamping =
2620 SOF_TIMESTAMPING_TX_HARDWARE |
2621 SOF_TIMESTAMPING_TX_SOFTWARE |
2622 SOF_TIMESTAMPING_RX_HARDWARE |
2623 SOF_TIMESTAMPING_RX_SOFTWARE |
2624 SOF_TIMESTAMPING_SOFTWARE |
2625 SOF_TIMESTAMPING_RAW_HARDWARE;
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03002626 info->phc_index = cpsw->cpts->phc_index;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002627 info->tx_types =
2628 (1 << HWTSTAMP_TX_OFF) |
2629 (1 << HWTSTAMP_TX_ON);
2630 info->rx_filters =
2631 (1 << HWTSTAMP_FILTER_NONE) |
Grygorii Strashkoe9523a52017-06-08 13:51:31 -05002632 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002633 (1 << HWTSTAMP_FILTER_PTP_V2_EVENT);
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002634 return 0;
2635}
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002636#else
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002637static int cpsw_get_ts_info(struct net_device *ndev,
2638 struct ethtool_ts_info *info)
2639{
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002640 info->so_timestamping =
2641 SOF_TIMESTAMPING_TX_SOFTWARE |
2642 SOF_TIMESTAMPING_RX_SOFTWARE |
2643 SOF_TIMESTAMPING_SOFTWARE;
2644 info->phc_index = -1;
2645 info->tx_types = 0;
2646 info->rx_filters = 0;
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002647 return 0;
2648}
Grygorii Strashkoc8395d42016-12-06 18:00:34 -06002649#endif
Richard Cochran2e5b38a2012-10-29 08:45:20 +00002650
Philippe Reynes24798762016-10-08 17:46:15 +02002651static int cpsw_get_link_ksettings(struct net_device *ndev,
2652 struct ethtool_link_ksettings *ecmd)
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002653{
2654 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002655 struct cpsw_common *cpsw = priv->cpsw;
2656 int slave_no = cpsw_slave_index(cpsw, priv);
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002657
yuval.shaia@oracle.com55141742017-06-13 10:09:46 +03002658 if (!cpsw->slaves[slave_no].phy)
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002659 return -EOPNOTSUPP;
yuval.shaia@oracle.com55141742017-06-13 10:09:46 +03002660
2661 phy_ethtool_ksettings_get(cpsw->slaves[slave_no].phy, ecmd);
2662 return 0;
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002663}
2664
Philippe Reynes24798762016-10-08 17:46:15 +02002665static int cpsw_set_link_ksettings(struct net_device *ndev,
2666 const struct ethtool_link_ksettings *ecmd)
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002667{
2668 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002669 struct cpsw_common *cpsw = priv->cpsw;
2670 int slave_no = cpsw_slave_index(cpsw, priv);
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002671
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002672 if (cpsw->slaves[slave_no].phy)
Philippe Reynes24798762016-10-08 17:46:15 +02002673 return phy_ethtool_ksettings_set(cpsw->slaves[slave_no].phy,
2674 ecmd);
Mugunthan V Nd3bb9c52013-03-11 23:16:36 +00002675 else
2676 return -EOPNOTSUPP;
2677}
2678
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002679static void cpsw_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2680{
2681 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002682 struct cpsw_common *cpsw = priv->cpsw;
2683 int slave_no = cpsw_slave_index(cpsw, priv);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002684
2685 wol->supported = 0;
2686 wol->wolopts = 0;
2687
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002688 if (cpsw->slaves[slave_no].phy)
2689 phy_ethtool_get_wol(cpsw->slaves[slave_no].phy, wol);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002690}
2691
2692static int cpsw_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
2693{
2694 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002695 struct cpsw_common *cpsw = priv->cpsw;
2696 int slave_no = cpsw_slave_index(cpsw, priv);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002697
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03002698 if (cpsw->slaves[slave_no].phy)
2699 return phy_ethtool_set_wol(cpsw->slaves[slave_no].phy, wol);
Matus Ujhelyid8a64422013-08-20 07:59:38 +02002700 else
2701 return -EOPNOTSUPP;
2702}
2703
Mugunthan V N1923d6e2014-09-08 22:54:02 +05302704static void cpsw_get_pauseparam(struct net_device *ndev,
2705 struct ethtool_pauseparam *pause)
2706{
2707 struct cpsw_priv *priv = netdev_priv(ndev);
2708
2709 pause->autoneg = AUTONEG_DISABLE;
2710 pause->rx_pause = priv->rx_pause ? true : false;
2711 pause->tx_pause = priv->tx_pause ? true : false;
2712}
2713
2714static int cpsw_set_pauseparam(struct net_device *ndev,
2715 struct ethtool_pauseparam *pause)
2716{
2717 struct cpsw_priv *priv = netdev_priv(ndev);
2718 bool link;
2719
2720 priv->rx_pause = pause->rx_pause ? true : false;
2721 priv->tx_pause = pause->tx_pause ? true : false;
2722
2723 for_each_slave(priv, _cpsw_adjust_link, priv, &link);
Mugunthan V N1923d6e2014-09-08 22:54:02 +05302724 return 0;
2725}
2726
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002727static int cpsw_ethtool_op_begin(struct net_device *ndev)
2728{
2729 struct cpsw_priv *priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03002730 struct cpsw_common *cpsw = priv->cpsw;
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002731 int ret;
2732
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002733 ret = pm_runtime_get_sync(cpsw->dev);
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002734 if (ret < 0) {
2735 cpsw_err(priv, drv, "ethtool begin failed %d\n", ret);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002736 pm_runtime_put_noidle(cpsw->dev);
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002737 }
2738
2739 return ret;
2740}
2741
2742static void cpsw_ethtool_op_complete(struct net_device *ndev)
2743{
2744 struct cpsw_priv *priv = netdev_priv(ndev);
2745 int ret;
2746
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03002747 ret = pm_runtime_put(priv->cpsw->dev);
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03002748 if (ret < 0)
2749 cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
2750}
2751
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002752static void cpsw_get_channels(struct net_device *ndev,
2753 struct ethtool_channels *ch)
2754{
2755 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
2756
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03002757 ch->max_rx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
2758 ch->max_tx = cpsw->quirk_irq ? 1 : CPSW_MAX_QUEUES;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002759 ch->max_combined = 0;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002760 ch->max_other = 0;
2761 ch->other_count = 0;
2762 ch->rx_count = cpsw->rx_ch_num;
2763 ch->tx_count = cpsw->tx_ch_num;
2764 ch->combined_count = 0;
2765}
2766
2767static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
2768 struct ethtool_channels *ch)
2769{
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03002770 if (cpsw->quirk_irq) {
2771 dev_err(cpsw->dev, "Maximum one tx/rx queue is allowed");
2772 return -EOPNOTSUPP;
2773 }
2774
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002775 if (ch->combined_count)
2776 return -EINVAL;
2777
2778 /* verify we have at least one channel in each direction */
2779 if (!ch->rx_count || !ch->tx_count)
2780 return -EINVAL;
2781
2782 if (ch->rx_count > cpsw->data.channels ||
2783 ch->tx_count > cpsw->data.channels)
2784 return -EINVAL;
2785
2786 return 0;
2787}
2788
2789static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
2790{
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002791 struct cpsw_common *cpsw = priv->cpsw;
2792 void (*handler)(void *, int, int);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002793 struct netdev_queue *queue;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002794 struct cpsw_vector *vec;
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03002795 int ret, *ch, vch;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002796
2797 if (rx) {
2798 ch = &cpsw->rx_ch_num;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002799 vec = cpsw->rxv;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002800 handler = cpsw_rx_handler;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002801 } else {
2802 ch = &cpsw->tx_ch_num;
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002803 vec = cpsw->txv;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002804 handler = cpsw_tx_handler;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002805 }
2806
2807 while (*ch < ch_num) {
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03002808 vch = rx ? *ch : 7 - *ch;
2809 vec[*ch].ch = cpdma_chan_create(cpsw->dma, vch, handler, rx);
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02002810 queue = netdev_get_tx_queue(priv->ndev, *ch);
2811 queue->tx_maxrate = 0;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002812
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002813 if (IS_ERR(vec[*ch].ch))
2814 return PTR_ERR(vec[*ch].ch);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002815
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002816 if (!vec[*ch].ch)
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002817 return -EINVAL;
2818
2819 cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
2820 (rx ? "rx" : "tx"));
2821 (*ch)++;
2822 }
2823
2824 while (*ch > ch_num) {
2825 (*ch)--;
2826
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002827 ret = cpdma_chan_destroy(vec[*ch].ch);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002828 if (ret)
2829 return ret;
2830
2831 cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
2832 (rx ? "rx" : "tx"));
2833 }
2834
2835 return 0;
2836}
2837
2838static int cpsw_update_channels(struct cpsw_priv *priv,
2839 struct ethtool_channels *ch)
2840{
2841 int ret;
2842
2843 ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
2844 if (ret)
2845 return ret;
2846
2847 ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
2848 if (ret)
2849 return ret;
2850
2851 return 0;
2852}
2853
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002854static void cpsw_suspend_data_pass(struct net_device *ndev)
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002855{
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002856 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002857 struct cpsw_slave *slave;
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002858 int i;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002859
2860 /* Disable NAPI scheduling */
2861 cpsw_intr_disable(cpsw);
2862
2863 /* Stop all transmit queues for every network device.
2864 * Disable re-using rx descriptors with dormant_on.
2865 */
2866 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2867 if (!(slave->ndev && netif_running(slave->ndev)))
2868 continue;
2869
2870 netif_tx_stop_all_queues(slave->ndev);
2871 netif_dormant_on(slave->ndev);
2872 }
2873
2874 /* Handle rest of tx packets and stop cpdma channels */
2875 cpdma_ctlr_stop(cpsw->dma);
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002876}
2877
2878static int cpsw_resume_data_pass(struct net_device *ndev)
2879{
2880 struct cpsw_priv *priv = netdev_priv(ndev);
2881 struct cpsw_common *cpsw = priv->cpsw;
2882 struct cpsw_slave *slave;
2883 int i, ret;
2884
2885 /* Allow rx packets handling */
2886 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2887 if (slave->ndev && netif_running(slave->ndev))
2888 netif_dormant_off(slave->ndev);
2889
2890 /* After this receive is started */
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002891 if (cpsw->usage_count) {
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002892 ret = cpsw_fill_rx_channels(priv);
2893 if (ret)
2894 return ret;
2895
2896 cpdma_ctlr_start(cpsw->dma);
2897 cpsw_intr_enable(cpsw);
2898 }
2899
2900 /* Resume transmit for every affected interface */
2901 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
2902 if (slave->ndev && netif_running(slave->ndev))
2903 netif_tx_start_all_queues(slave->ndev);
2904
2905 return 0;
2906}
2907
2908static int cpsw_set_channels(struct net_device *ndev,
2909 struct ethtool_channels *chs)
2910{
2911 struct cpsw_priv *priv = netdev_priv(ndev);
2912 struct cpsw_common *cpsw = priv->cpsw;
2913 struct cpsw_slave *slave;
2914 int i, ret;
2915
2916 ret = cpsw_check_ch_settings(cpsw, chs);
2917 if (ret < 0)
2918 return ret;
2919
2920 cpsw_suspend_data_pass(ndev);
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002921 ret = cpsw_update_channels(priv, chs);
2922 if (ret)
2923 goto err;
2924
2925 for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
2926 if (!(slave->ndev && netif_running(slave->ndev)))
2927 continue;
2928
2929 /* Inform stack about new count of queues */
2930 ret = netif_set_real_num_tx_queues(slave->ndev,
2931 cpsw->tx_ch_num);
2932 if (ret) {
2933 dev_err(priv->dev, "cannot set real number of tx queues\n");
2934 goto err;
2935 }
2936
2937 ret = netif_set_real_num_rx_queues(slave->ndev,
2938 cpsw->rx_ch_num);
2939 if (ret) {
2940 dev_err(priv->dev, "cannot set real number of rx queues\n");
2941 goto err;
2942 }
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002943 }
2944
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02002945 if (cpsw->usage_count)
Ivan Khoronzhuk32b78d82016-12-10 14:23:48 +02002946 cpsw_split_res(ndev);
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02002947
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02002948 ret = cpsw_resume_data_pass(ndev);
2949 if (!ret)
2950 return 0;
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03002951err:
2952 dev_err(priv->dev, "cannot update channels number, closing device\n");
2953 dev_close(ndev);
2954 return ret;
2955}
2956
Yegor Yefremova0909942016-11-28 09:41:33 +01002957static int cpsw_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
2958{
2959 struct cpsw_priv *priv = netdev_priv(ndev);
2960 struct cpsw_common *cpsw = priv->cpsw;
2961 int slave_no = cpsw_slave_index(cpsw, priv);
2962
2963 if (cpsw->slaves[slave_no].phy)
2964 return phy_ethtool_get_eee(cpsw->slaves[slave_no].phy, edata);
2965 else
2966 return -EOPNOTSUPP;
2967}
2968
2969static int cpsw_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
2970{
2971 struct cpsw_priv *priv = netdev_priv(ndev);
2972 struct cpsw_common *cpsw = priv->cpsw;
2973 int slave_no = cpsw_slave_index(cpsw, priv);
2974
2975 if (cpsw->slaves[slave_no].phy)
2976 return phy_ethtool_set_eee(cpsw->slaves[slave_no].phy, edata);
2977 else
2978 return -EOPNOTSUPP;
2979}
2980
Yegor Yefremov6bb10c22016-11-28 10:47:52 +01002981static int cpsw_nway_reset(struct net_device *ndev)
2982{
2983 struct cpsw_priv *priv = netdev_priv(ndev);
2984 struct cpsw_common *cpsw = priv->cpsw;
2985 int slave_no = cpsw_slave_index(cpsw, priv);
2986
2987 if (cpsw->slaves[slave_no].phy)
2988 return genphy_restart_aneg(cpsw->slaves[slave_no].phy);
2989 else
2990 return -EOPNOTSUPP;
2991}
2992
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06002993static void cpsw_get_ringparam(struct net_device *ndev,
2994 struct ethtool_ringparam *ering)
2995{
2996 struct cpsw_priv *priv = netdev_priv(ndev);
2997 struct cpsw_common *cpsw = priv->cpsw;
2998
2999 /* not supported */
3000 ering->tx_max_pending = 0;
3001 ering->tx_pending = cpdma_get_num_tx_descs(cpsw->dma);
Ivan Khoronzhukf89d21b2017-01-08 22:12:27 +02003002 ering->rx_max_pending = descs_pool_size - CPSW_MAX_QUEUES;
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003003 ering->rx_pending = cpdma_get_num_rx_descs(cpsw->dma);
3004}
3005
3006static int cpsw_set_ringparam(struct net_device *ndev,
3007 struct ethtool_ringparam *ering)
3008{
3009 struct cpsw_priv *priv = netdev_priv(ndev);
3010 struct cpsw_common *cpsw = priv->cpsw;
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003011 int ret;
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003012
3013 /* ignore ering->tx_pending - only rx_pending adjustment is supported */
3014
3015 if (ering->rx_mini_pending || ering->rx_jumbo_pending ||
Ivan Khoronzhukf89d21b2017-01-08 22:12:27 +02003016 ering->rx_pending < CPSW_MAX_QUEUES ||
3017 ering->rx_pending > (descs_pool_size - CPSW_MAX_QUEUES))
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003018 return -EINVAL;
3019
3020 if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
3021 return 0;
3022
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003023 cpsw_suspend_data_pass(ndev);
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003024
3025 cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
3026
Ivan Khoronzhukd5bc1612017-02-14 16:02:36 +02003027 if (cpsw->usage_count)
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003028 cpdma_chan_split_pool(cpsw->dma);
3029
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003030 ret = cpsw_resume_data_pass(ndev);
3031 if (!ret)
3032 return 0;
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003033
Ivan Khoronzhuk022d7ad2017-01-19 18:58:27 +02003034 dev_err(&ndev->dev, "cannot set ring params, closing device\n");
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003035 dev_close(ndev);
3036 return ret;
3037}
3038
Mugunthan V Ndf828592012-03-18 20:17:54 +00003039static const struct ethtool_ops cpsw_ethtool_ops = {
3040 .get_drvinfo = cpsw_get_drvinfo,
3041 .get_msglevel = cpsw_get_msglevel,
3042 .set_msglevel = cpsw_set_msglevel,
3043 .get_link = ethtool_op_get_link,
Richard Cochran2e5b38a2012-10-29 08:45:20 +00003044 .get_ts_info = cpsw_get_ts_info,
Mugunthan V Nff5b8ef2013-03-11 23:16:37 +00003045 .get_coalesce = cpsw_get_coalesce,
3046 .set_coalesce = cpsw_set_coalesce,
Mugunthan V Nd9718542013-07-23 15:38:17 +05303047 .get_sset_count = cpsw_get_sset_count,
3048 .get_strings = cpsw_get_strings,
3049 .get_ethtool_stats = cpsw_get_ethtool_stats,
Mugunthan V N1923d6e2014-09-08 22:54:02 +05303050 .get_pauseparam = cpsw_get_pauseparam,
3051 .set_pauseparam = cpsw_set_pauseparam,
Matus Ujhelyid8a64422013-08-20 07:59:38 +02003052 .get_wol = cpsw_get_wol,
3053 .set_wol = cpsw_set_wol,
Mugunthan V N52c4f0e2014-07-22 23:25:07 +05303054 .get_regs_len = cpsw_get_regs_len,
3055 .get_regs = cpsw_get_regs,
Grygorii Strashko7898b1d2016-06-24 21:23:44 +03003056 .begin = cpsw_ethtool_op_begin,
3057 .complete = cpsw_ethtool_op_complete,
Ivan Khoronzhukce52c742016-08-22 21:18:28 +03003058 .get_channels = cpsw_get_channels,
3059 .set_channels = cpsw_set_channels,
Philippe Reynes24798762016-10-08 17:46:15 +02003060 .get_link_ksettings = cpsw_get_link_ksettings,
3061 .set_link_ksettings = cpsw_set_link_ksettings,
Yegor Yefremova0909942016-11-28 09:41:33 +01003062 .get_eee = cpsw_get_eee,
3063 .set_eee = cpsw_set_eee,
Yegor Yefremov6bb10c22016-11-28 10:47:52 +01003064 .nway_reset = cpsw_nway_reset,
Grygorii Strashkobe034fc2017-01-06 14:07:34 -06003065 .get_ringparam = cpsw_get_ringparam,
3066 .set_ringparam = cpsw_set_ringparam,
Mugunthan V Ndf828592012-03-18 20:17:54 +00003067};
3068
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003069static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
Richard Cochran549985e2012-11-14 09:07:56 +00003070 u32 slave_reg_ofs, u32 sliver_reg_ofs)
Mugunthan V Ndf828592012-03-18 20:17:54 +00003071{
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003072 void __iomem *regs = cpsw->regs;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003073 int slave_num = slave->slave_num;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003074 struct cpsw_slave_data *data = cpsw->data.slave_data + slave_num;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003075
3076 slave->data = data;
Richard Cochran549985e2012-11-14 09:07:56 +00003077 slave->regs = regs + slave_reg_ofs;
3078 slave->sliver = regs + sliver_reg_ofs;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003079 slave->port_vlan = data->dual_emac_res_vlan;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003080}
3081
David Rivshin552165b2016-04-27 21:25:25 -04003082static int cpsw_probe_dt(struct cpsw_platform_data *data,
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003083 struct platform_device *pdev)
3084{
3085 struct device_node *node = pdev->dev.of_node;
3086 struct device_node *slave_node;
3087 int i = 0, ret;
3088 u32 prop;
3089
3090 if (!node)
3091 return -EINVAL;
3092
3093 if (of_property_read_u32(node, "slaves", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303094 dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003095 return -EINVAL;
3096 }
3097 data->slaves = prop;
3098
Mugunthan V Ne86ac132013-03-11 23:16:35 +00003099 if (of_property_read_u32(node, "active_slave", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303100 dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303101 return -EINVAL;
Richard Cochran78ca0b22012-10-29 08:45:18 +00003102 }
Mugunthan V Ne86ac132013-03-11 23:16:35 +00003103 data->active_slave = prop;
Richard Cochran78ca0b22012-10-29 08:45:18 +00003104
Kees Cooka86854d2018-06-12 14:07:58 -07003105 data->slave_data = devm_kcalloc(&pdev->dev,
3106 data->slaves,
3107 sizeof(struct cpsw_slave_data),
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303108 GFP_KERNEL);
Joe Perchesb2adaca2013-02-03 17:43:58 +00003109 if (!data->slave_data)
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303110 return -ENOMEM;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003111
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003112 if (of_property_read_u32(node, "cpdma_channels", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303113 dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303114 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003115 }
3116 data->channels = prop;
3117
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003118 if (of_property_read_u32(node, "ale_entries", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303119 dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303120 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003121 }
3122 data->ale_entries = prop;
3123
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003124 if (of_property_read_u32(node, "bd_ram_size", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303125 dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303126 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003127 }
3128 data->bd_ram_size = prop;
3129
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003130 if (of_property_read_u32(node, "mac_control", &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303131 dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303132 return -EINVAL;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003133 }
3134 data->mac_control = prop;
3135
Markus Pargmann281abd92013-10-04 14:44:40 +02003136 if (of_property_read_bool(node, "dual_emac"))
3137 data->dual_emac = 1;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003138
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003139 /*
3140 * Populate all the child nodes here...
3141 */
3142 ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
3143 /* We do not want to force this, as in some cases may not have child */
3144 if (ret)
George Cherian88c99ff2014-05-12 10:21:19 +05303145 dev_warn(&pdev->dev, "Doesn't have any child node\n");
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003146
Ben Hutchings8658aaf2016-06-21 01:16:31 +01003147 for_each_available_child_of_node(node, slave_node) {
Richard Cochran549985e2012-11-14 09:07:56 +00003148 struct cpsw_slave_data *slave_data = data->slave_data + i;
3149 const void *mac_addr = NULL;
Richard Cochran549985e2012-11-14 09:07:56 +00003150 int lenp;
3151 const __be32 *parp;
Richard Cochran549985e2012-11-14 09:07:56 +00003152
Markus Pargmannf468b102013-10-04 14:44:39 +02003153 /* This is no slave child node, continue */
3154 if (strcmp(slave_node->name, "slave"))
3155 continue;
3156
Grygorii Strashko3ff18842018-11-25 18:15:25 -06003157 slave_data->ifphy = devm_of_phy_get(&pdev->dev, slave_node,
3158 NULL);
3159 if (!IS_ENABLED(CONFIG_TI_CPSW_PHY_SEL) &&
3160 IS_ERR(slave_data->ifphy)) {
3161 ret = PTR_ERR(slave_data->ifphy);
3162 dev_err(&pdev->dev,
3163 "%d: Error retrieving port phy: %d\n", i, ret);
3164 return ret;
3165 }
3166
David Rivshin552165b2016-04-27 21:25:25 -04003167 slave_data->phy_node = of_parse_phandle(slave_node,
3168 "phy-handle", 0);
David Rivshinf1eea5c2015-12-16 23:02:10 -05003169 parp = of_get_property(slave_node, "phy_id", &lenp);
David Rivshinae092b52016-04-27 21:38:26 -04003170 if (slave_data->phy_node) {
3171 dev_dbg(&pdev->dev,
Rob Herringf7ce9102017-07-18 16:43:19 -05003172 "slave[%d] using phy-handle=\"%pOF\"\n",
3173 i, slave_data->phy_node);
David Rivshinae092b52016-04-27 21:38:26 -04003174 } else if (of_phy_is_fixed_link(slave_node)) {
David Rivshindfc0a6d2015-12-16 23:02:11 -05003175 /* In the case of a fixed PHY, the DT node associated
3176 * to the PHY is the Ethernet MAC DT node.
3177 */
Markus Brunner1f71e8c2015-11-03 22:09:51 +01003178 ret = of_phy_register_fixed_link(slave_node);
Johan Hovold23a09872016-11-17 17:40:04 +01003179 if (ret) {
3180 if (ret != -EPROBE_DEFER)
3181 dev_err(&pdev->dev, "failed to register fixed-link phy: %d\n", ret);
Markus Brunner1f71e8c2015-11-03 22:09:51 +01003182 return ret;
Johan Hovold23a09872016-11-17 17:40:04 +01003183 }
David Rivshin06cd6d62016-04-27 21:45:45 -04003184 slave_data->phy_node = of_node_get(slave_node);
David Rivshinf1eea5c2015-12-16 23:02:10 -05003185 } else if (parp) {
3186 u32 phyid;
3187 struct device_node *mdio_node;
3188 struct platform_device *mdio;
3189
3190 if (lenp != (sizeof(__be32) * 2)) {
3191 dev_err(&pdev->dev, "Invalid slave[%d] phy_id property\n", i);
3192 goto no_phy_slave;
3193 }
3194 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
3195 phyid = be32_to_cpup(parp+1);
3196 mdio = of_find_device_by_node(mdio_node);
3197 of_node_put(mdio_node);
3198 if (!mdio) {
3199 dev_err(&pdev->dev, "Missing mdio platform device\n");
3200 return -EINVAL;
3201 }
3202 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
3203 PHY_ID_FMT, mdio->name, phyid);
Johan Hovold86e1d5a2016-11-17 17:39:59 +01003204 put_device(&mdio->dev);
David Rivshinf1eea5c2015-12-16 23:02:10 -05003205 } else {
David Rivshinae092b52016-04-27 21:38:26 -04003206 dev_err(&pdev->dev,
3207 "No slave[%d] phy_id, phy-handle, or fixed-link property\n",
3208 i);
Markus Brunner1f71e8c2015-11-03 22:09:51 +01003209 goto no_phy_slave;
3210 }
Mugunthan V N47276fc2014-10-24 18:51:33 +05303211 slave_data->phy_if = of_get_phy_mode(slave_node);
3212 if (slave_data->phy_if < 0) {
3213 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
3214 i);
3215 return slave_data->phy_if;
3216 }
3217
3218no_phy_slave:
Richard Cochran549985e2012-11-14 09:07:56 +00003219 mac_addr = of_get_mac_address(slave_node);
Markus Pargmann0ba517b2014-09-29 08:53:17 +02003220 if (mac_addr) {
Richard Cochran549985e2012-11-14 09:07:56 +00003221 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
Markus Pargmann0ba517b2014-09-29 08:53:17 +02003222 } else {
Mugunthan V Nb6745f62015-09-21 15:56:50 +05303223 ret = ti_cm_get_macid(&pdev->dev, i,
3224 slave_data->mac_addr);
3225 if (ret)
3226 return ret;
Markus Pargmann0ba517b2014-09-29 08:53:17 +02003227 }
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003228 if (data->dual_emac) {
Mugunthan V N91c41662013-04-15 07:31:28 +00003229 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003230 &prop)) {
George Cherian88c99ff2014-05-12 10:21:19 +05303231 dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003232 slave_data->dual_emac_res_vlan = i+1;
George Cherian88c99ff2014-05-12 10:21:19 +05303233 dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
3234 slave_data->dual_emac_res_vlan, i);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003235 } else {
3236 slave_data->dual_emac_res_vlan = prop;
3237 }
3238 }
3239
Richard Cochran549985e2012-11-14 09:07:56 +00003240 i++;
Mugunthan V N3a27bfa2013-12-02 12:53:39 +05303241 if (i == data->slaves)
3242 break;
Richard Cochran549985e2012-11-14 09:07:56 +00003243 }
3244
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003245 return 0;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003246}
3247
Johan Hovolda4e32b02016-11-17 17:40:00 +01003248static void cpsw_remove_dt(struct platform_device *pdev)
3249{
Johan Hovold8cbcc462016-11-17 17:40:01 +01003250 struct net_device *ndev = platform_get_drvdata(pdev);
3251 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
3252 struct cpsw_platform_data *data = &cpsw->data;
3253 struct device_node *node = pdev->dev.of_node;
3254 struct device_node *slave_node;
3255 int i = 0;
3256
3257 for_each_available_child_of_node(node, slave_node) {
3258 struct cpsw_slave_data *slave_data = &data->slave_data[i];
3259
3260 if (strcmp(slave_node->name, "slave"))
3261 continue;
3262
Johan Hovold3f650472016-11-28 19:24:55 +01003263 if (of_phy_is_fixed_link(slave_node))
3264 of_phy_deregister_fixed_link(slave_node);
Johan Hovold8cbcc462016-11-17 17:40:01 +01003265
3266 of_node_put(slave_data->phy_node);
3267
3268 i++;
3269 if (i == data->slaves)
3270 break;
3271 }
3272
Johan Hovolda4e32b02016-11-17 17:40:00 +01003273 of_platform_depopulate(&pdev->dev);
3274}
3275
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003276static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003277{
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003278 struct cpsw_common *cpsw = priv->cpsw;
3279 struct cpsw_platform_data *data = &cpsw->data;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003280 struct net_device *ndev;
3281 struct cpsw_priv *priv_sl2;
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03003282 int ret = 0;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003283
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03003284 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003285 if (!ndev) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003286 dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003287 return -ENOMEM;
3288 }
3289
3290 priv_sl2 = netdev_priv(ndev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003291 priv_sl2->cpsw = cpsw;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003292 priv_sl2->ndev = ndev;
3293 priv_sl2->dev = &ndev->dev;
3294 priv_sl2->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003295
3296 if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
3297 memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
3298 ETH_ALEN);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003299 dev_info(cpsw->dev, "cpsw: Detected MACID = %pM\n",
3300 priv_sl2->mac_addr);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003301 } else {
Joe Perches6c1f0a12018-06-22 10:51:00 -07003302 eth_random_addr(priv_sl2->mac_addr);
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003303 dev_info(cpsw->dev, "cpsw: Random MACID = %pM\n",
3304 priv_sl2->mac_addr);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003305 }
3306 memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
3307
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003308 priv_sl2->emac_port = 1;
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003309 cpsw->slaves[1].ndev = ndev;
Ivan Khoronzhuk193736c2018-07-27 19:54:39 +03003310 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003311
3312 ndev->netdev_ops = &cpsw_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00003313 ndev->ethtool_ops = &cpsw_ethtool_ops;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003314
3315 /* register the network device */
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003316 SET_NETDEV_DEV(ndev, cpsw->dev);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003317 ret = register_netdev(ndev);
3318 if (ret) {
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003319 dev_err(cpsw->dev, "cpsw: error registering net device\n");
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003320 free_netdev(ndev);
3321 ret = -ENODEV;
3322 }
3323
3324 return ret;
3325}
3326
Mugunthan V N7da11602015-08-12 15:22:53 +05303327static const struct of_device_id cpsw_of_mtable[] = {
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003328 { .compatible = "ti,cpsw"},
3329 { .compatible = "ti,am335x-cpsw"},
3330 { .compatible = "ti,am4372-cpsw"},
3331 { .compatible = "ti,dra7-cpsw"},
Mugunthan V N7da11602015-08-12 15:22:53 +05303332 { /* sentinel */ },
3333};
3334MODULE_DEVICE_TABLE(of, cpsw_of_mtable);
3335
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003336static const struct soc_device_attribute cpsw_soc_devices[] = {
3337 { .family = "AM33xx", .revision = "ES1.0"},
3338 { /* sentinel */ }
3339};
3340
Bill Pemberton663e12e2012-12-03 09:23:45 -05003341static int cpsw_probe(struct platform_device *pdev)
Mugunthan V Ndf828592012-03-18 20:17:54 +00003342{
Ivan Khoronzhukef4183a2016-08-10 02:22:35 +03003343 struct clk *clk;
Sebastian Siewiord1bd9ac2013-04-24 08:48:23 +00003344 struct cpsw_platform_data *data;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003345 struct net_device *ndev;
3346 struct cpsw_priv *priv;
3347 struct cpdma_params dma_params;
3348 struct cpsw_ale_params ale_params;
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303349 void __iomem *ss_regs;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003350 void __iomem *cpts_regs;
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303351 struct resource *res, *ss_res;
Mugunthan V N1d147cc2015-09-07 15:16:44 +05303352 struct gpio_descs *mode;
Richard Cochran549985e2012-11-14 09:07:56 +00003353 u32 slave_offset, sliver_offset, slave_size;
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003354 const struct soc_device_attribute *soc;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003355 struct cpsw_common *cpsw;
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03003356 int ret = 0, i, ch;
Felipe Balbi5087b912015-01-16 10:11:11 -06003357 int irq;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003358
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003359 cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
Johan Hovold3420ea82016-11-17 17:40:03 +01003360 if (!cpsw)
3361 return -ENOMEM;
3362
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003363 cpsw->dev = &pdev->dev;
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003364
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03003365 ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003366 if (!ndev) {
George Cherian88c99ff2014-05-12 10:21:19 +05303367 dev_err(&pdev->dev, "error allocating net_device\n");
Mugunthan V Ndf828592012-03-18 20:17:54 +00003368 return -ENOMEM;
3369 }
3370
3371 platform_set_drvdata(pdev, ndev);
3372 priv = netdev_priv(ndev);
Ivan Khoronzhuk649a1682016-08-10 02:22:37 +03003373 priv->cpsw = cpsw;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003374 priv->ndev = ndev;
3375 priv->dev = &ndev->dev;
3376 priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003377 cpsw->rx_packet_max = max(rx_packet_max, 128);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003378
Mugunthan V N1d147cc2015-09-07 15:16:44 +05303379 mode = devm_gpiod_get_array_optional(&pdev->dev, "mode", GPIOD_OUT_LOW);
3380 if (IS_ERR(mode)) {
3381 ret = PTR_ERR(mode);
3382 dev_err(&pdev->dev, "gpio request failed, ret %d\n", ret);
3383 goto clean_ndev_ret;
3384 }
3385
Vaibhav Hiremath1fb19aa2012-11-14 09:07:55 +00003386 /*
3387 * This may be required here for child devices.
3388 */
3389 pm_runtime_enable(&pdev->dev);
3390
Mugunthan V N739683b2013-06-06 23:45:14 +05303391 /* Select default pin state */
3392 pinctrl_pm_select_default_state(&pdev->dev);
3393
Johan Hovolda4e32b02016-11-17 17:40:00 +01003394 /* Need to enable clocks with runtime PM api to access module
3395 * registers
3396 */
3397 ret = pm_runtime_get_sync(&pdev->dev);
3398 if (ret < 0) {
3399 pm_runtime_put_noidle(&pdev->dev);
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303400 goto clean_runtime_disable_ret;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003401 }
Johan Hovolda4e32b02016-11-17 17:40:00 +01003402
Johan Hovold23a09872016-11-17 17:40:04 +01003403 ret = cpsw_probe_dt(&cpsw->data, pdev);
3404 if (ret)
Johan Hovolda4e32b02016-11-17 17:40:00 +01003405 goto clean_dt_ret;
Johan Hovold23a09872016-11-17 17:40:04 +01003406
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003407 data = &cpsw->data;
Ivan Khoronzhuke05107e2016-08-22 21:18:26 +03003408 cpsw->rx_ch_num = 1;
3409 cpsw->tx_ch_num = 1;
Mugunthan V N2eb32b02012-07-30 10:17:14 +00003410
Mugunthan V Ndf828592012-03-18 20:17:54 +00003411 if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
3412 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
George Cherian88c99ff2014-05-12 10:21:19 +05303413 dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003414 } else {
Joe Perches7efd26d2012-07-12 19:33:06 +00003415 eth_random_addr(priv->mac_addr);
George Cherian88c99ff2014-05-12 10:21:19 +05303416 dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003417 }
3418
3419 memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
3420
Kees Cooka86854d2018-06-12 14:07:58 -07003421 cpsw->slaves = devm_kcalloc(&pdev->dev,
3422 data->slaves, sizeof(struct cpsw_slave),
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303423 GFP_KERNEL);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003424 if (!cpsw->slaves) {
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303425 ret = -ENOMEM;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003426 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003427 }
3428 for (i = 0; i < data->slaves; i++)
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003429 cpsw->slaves[i].slave_num = i;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003430
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003431 cpsw->slaves[0].ndev = ndev;
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003432 priv->emac_port = 0;
3433
Ivan Khoronzhukef4183a2016-08-10 02:22:35 +03003434 clk = devm_clk_get(&pdev->dev, "fck");
3435 if (IS_ERR(clk)) {
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303436 dev_err(priv->dev, "fck is not found\n");
Mugunthan V Nf150bd72012-07-17 08:09:50 +00003437 ret = -ENODEV;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003438 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003439 }
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003440 cpsw->bus_freq_mhz = clk_get_rate(clk) / 1000000;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003441
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303442 ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3443 ss_regs = devm_ioremap_resource(&pdev->dev, ss_res);
3444 if (IS_ERR(ss_regs)) {
3445 ret = PTR_ERR(ss_regs);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003446 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003447 }
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003448 cpsw->regs = ss_regs;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003449
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003450 cpsw->version = readl(&cpsw->regs->id_ver);
Mugunthan V Nf280e892013-12-11 22:09:05 -06003451
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303452 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003453 cpsw->wr_regs = devm_ioremap_resource(&pdev->dev, res);
3454 if (IS_ERR(cpsw->wr_regs)) {
3455 ret = PTR_ERR(cpsw->wr_regs);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003456 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003457 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00003458
3459 memset(&dma_params, 0, sizeof(dma_params));
Richard Cochran549985e2012-11-14 09:07:56 +00003460 memset(&ale_params, 0, sizeof(ale_params));
3461
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003462 switch (cpsw->version) {
Richard Cochran549985e2012-11-14 09:07:56 +00003463 case CPSW_VERSION_1:
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003464 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003465 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003466 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
Richard Cochran549985e2012-11-14 09:07:56 +00003467 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
3468 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
3469 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
3470 slave_offset = CPSW1_SLAVE_OFFSET;
3471 slave_size = CPSW1_SLAVE_SIZE;
3472 sliver_offset = CPSW1_SLIVER_OFFSET;
3473 dma_params.desc_mem_phys = 0;
3474 break;
3475 case CPSW_VERSION_2:
Mugunthan V Nc193f362013-08-05 17:30:05 +05303476 case CPSW_VERSION_3:
Mugunthan V N926489b2013-08-12 17:11:15 +05303477 case CPSW_VERSION_4:
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003478 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003479 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
Ivan Khoronzhuk5d8d0d42016-08-10 02:22:39 +03003480 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
Richard Cochran549985e2012-11-14 09:07:56 +00003481 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
3482 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
3483 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
3484 slave_offset = CPSW2_SLAVE_OFFSET;
3485 slave_size = CPSW2_SLAVE_SIZE;
3486 sliver_offset = CPSW2_SLIVER_OFFSET;
3487 dma_params.desc_mem_phys =
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303488 (u32 __force) ss_res->start + CPSW2_BD_OFFSET;
Richard Cochran549985e2012-11-14 09:07:56 +00003489 break;
3490 default:
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003491 dev_err(priv->dev, "unknown version 0x%08x\n", cpsw->version);
Richard Cochran549985e2012-11-14 09:07:56 +00003492 ret = -ENODEV;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003493 goto clean_dt_ret;
Richard Cochran549985e2012-11-14 09:07:56 +00003494 }
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003495 for (i = 0; i < cpsw->data.slaves; i++) {
3496 struct cpsw_slave *slave = &cpsw->slaves[i];
3497
3498 cpsw_slave_init(slave, cpsw, slave_offset, sliver_offset);
Richard Cochran549985e2012-11-14 09:07:56 +00003499 slave_offset += slave_size;
3500 sliver_offset += SLIVER_SIZE;
3501 }
3502
Mugunthan V Ndf828592012-03-18 20:17:54 +00003503 dma_params.dev = &pdev->dev;
Richard Cochran549985e2012-11-14 09:07:56 +00003504 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
3505 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
3506 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
3507 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
3508 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003509
3510 dma_params.num_chan = data->channels;
3511 dma_params.has_soft_reset = true;
3512 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
3513 dma_params.desc_mem_size = data->bd_ram_size;
3514 dma_params.desc_align = 16;
3515 dma_params.has_ext_regs = true;
Richard Cochran549985e2012-11-14 09:07:56 +00003516 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
Ivan Khoronzhuk83fcad02016-11-29 17:00:49 +02003517 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
Grygorii Strashko90225bf2017-01-06 14:07:33 -06003518 dma_params.descs_pool_size = descs_pool_size;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003519
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03003520 cpsw->dma = cpdma_ctlr_create(&dma_params);
3521 if (!cpsw->dma) {
Mugunthan V Ndf828592012-03-18 20:17:54 +00003522 dev_err(priv->dev, "error initializing dma\n");
3523 ret = -ENOMEM;
Johan Hovolda4e32b02016-11-17 17:40:00 +01003524 goto clean_dt_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003525 }
3526
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003527 soc = soc_device_match(cpsw_soc_devices);
3528 if (soc)
3529 cpsw->quirk_irq = 1;
3530
Ivan Khoronzhuk79b33252018-07-24 00:26:29 +03003531 ch = cpsw->quirk_irq ? 0 : 7;
3532 cpsw->txv[0].ch = cpdma_chan_create(cpsw->dma, ch, cpsw_tx_handler, 0);
Ivan Khoronzhuk8a83c5d2017-12-12 23:06:35 +02003533 if (IS_ERR(cpsw->txv[0].ch)) {
3534 dev_err(priv->dev, "error initializing tx dma channel\n");
3535 ret = PTR_ERR(cpsw->txv[0].ch);
3536 goto clean_dma_ret;
3537 }
3538
Ivan Khoronzhuk8feb0a12016-11-29 17:00:51 +02003539 cpsw->rxv[0].ch = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
Ivan Khoronzhuk8a83c5d2017-12-12 23:06:35 +02003540 if (IS_ERR(cpsw->rxv[0].ch)) {
3541 dev_err(priv->dev, "error initializing rx dma channel\n");
3542 ret = PTR_ERR(cpsw->rxv[0].ch);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003543 goto clean_dma_ret;
3544 }
3545
Ivan Khoronzhuk9fe9aa02017-02-15 19:45:02 +02003546 ale_params.dev = &pdev->dev;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003547 ale_params.ale_ageout = ale_ageout;
3548 ale_params.ale_entries = data->ale_entries;
Grygorii Strashkoc6395f12017-11-30 18:21:14 -06003549 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003550
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003551 cpsw->ale = cpsw_ale_create(&ale_params);
3552 if (!cpsw->ale) {
Mugunthan V Ndf828592012-03-18 20:17:54 +00003553 dev_err(priv->dev, "error initializing ale engine\n");
3554 ret = -ENODEV;
3555 goto clean_dma_ret;
3556 }
3557
Grygorii Strashko4a88fb92016-12-06 18:00:42 -06003558 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpsw->dev->of_node);
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003559 if (IS_ERR(cpsw->cpts)) {
3560 ret = PTR_ERR(cpsw->cpts);
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003561 goto clean_dma_ret;
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003562 }
3563
Felipe Balbic03abd82015-01-16 10:11:12 -06003564 ndev->irq = platform_get_irq(pdev, 1);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003565 if (ndev->irq < 0) {
3566 dev_err(priv->dev, "error getting irq resource\n");
Julia Lawallc1e33342015-12-26 20:12:13 +01003567 ret = ndev->irq;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003568 goto clean_dma_ret;
Mugunthan V Ndf828592012-03-18 20:17:54 +00003569 }
3570
Grygorii Strashkoa3a41d22018-03-15 15:15:50 -05003571 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_RX;
Keerthy070f9c62017-07-20 16:59:52 +05303572
3573 ndev->netdev_ops = &cpsw_netdev_ops;
3574 ndev->ethtool_ops = &cpsw_ethtool_ops;
Ivan Khoronzhuk9611d6d2018-05-17 01:21:45 +03003575 netif_napi_add(ndev, &cpsw->napi_rx,
3576 cpsw->quirk_irq ? cpsw_rx_poll : cpsw_rx_mq_poll,
3577 CPSW_POLL_WEIGHT);
3578 netif_tx_napi_add(ndev, &cpsw->napi_tx,
3579 cpsw->quirk_irq ? cpsw_tx_poll : cpsw_tx_mq_poll,
3580 CPSW_POLL_WEIGHT);
Keerthy070f9c62017-07-20 16:59:52 +05303581 cpsw_split_res(ndev);
3582
3583 /* register the network device */
3584 SET_NETDEV_DEV(ndev, &pdev->dev);
3585 ret = register_netdev(ndev);
3586 if (ret) {
3587 dev_err(priv->dev, "error registering net device\n");
3588 ret = -ENODEV;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003589 goto clean_dma_ret;
Keerthy070f9c62017-07-20 16:59:52 +05303590 }
3591
3592 if (cpsw->data.dual_emac) {
3593 ret = cpsw_probe_dual_emac(priv);
3594 if (ret) {
3595 cpsw_err(priv, probe, "error probe slave 2 emac interface\n");
3596 goto clean_unregister_netdev_ret;
3597 }
3598 }
3599
Felipe Balbic03abd82015-01-16 10:11:12 -06003600 /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and
3601 * MISC IRQs which are always kept disabled with this driver so
3602 * we will not request them.
3603 *
3604 * If anyone wants to implement support for those, make sure to
3605 * first request and append them to irqs_table array.
3606 */
Daniel Mackc2b32e52014-09-04 09:00:23 +02003607
Felipe Balbic03abd82015-01-16 10:11:12 -06003608 /* RX IRQ */
Felipe Balbi5087b912015-01-16 10:11:11 -06003609 irq = platform_get_irq(pdev, 1);
Julia Lawallc1e33342015-12-26 20:12:13 +01003610 if (irq < 0) {
3611 ret = irq;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003612 goto clean_dma_ret;
Julia Lawallc1e33342015-12-26 20:12:13 +01003613 }
Felipe Balbi5087b912015-01-16 10:11:11 -06003614
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03003615 cpsw->irqs_table[0] = irq;
Felipe Balbic03abd82015-01-16 10:11:12 -06003616 ret = devm_request_irq(&pdev->dev, irq, cpsw_rx_interrupt,
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03003617 0, dev_name(&pdev->dev), cpsw);
Felipe Balbi5087b912015-01-16 10:11:11 -06003618 if (ret < 0) {
3619 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003620 goto clean_dma_ret;
Felipe Balbi5087b912015-01-16 10:11:11 -06003621 }
3622
Felipe Balbic03abd82015-01-16 10:11:12 -06003623 /* TX IRQ */
Felipe Balbi5087b912015-01-16 10:11:11 -06003624 irq = platform_get_irq(pdev, 2);
Julia Lawallc1e33342015-12-26 20:12:13 +01003625 if (irq < 0) {
3626 ret = irq;
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003627 goto clean_dma_ret;
Julia Lawallc1e33342015-12-26 20:12:13 +01003628 }
Felipe Balbi5087b912015-01-16 10:11:11 -06003629
Ivan Khoronzhuke38b5a32016-08-10 02:22:41 +03003630 cpsw->irqs_table[1] = irq;
Felipe Balbic03abd82015-01-16 10:11:12 -06003631 ret = devm_request_irq(&pdev->dev, irq, cpsw_tx_interrupt,
Ivan Khoronzhukdbc4ec52016-08-10 02:22:43 +03003632 0, dev_name(&pdev->dev), cpsw);
Felipe Balbi5087b912015-01-16 10:11:11 -06003633 if (ret < 0) {
3634 dev_err(priv->dev, "error attaching irq (%d)\n", ret);
Grygorii Strashko1971ab52017-11-30 18:21:19 -06003635 goto clean_dma_ret;
Felipe Balbi5087b912015-01-16 10:11:11 -06003636 }
Daniel Mackc2b32e52014-09-04 09:00:23 +02003637
Grygorii Strashko90225bf2017-01-06 14:07:33 -06003638 cpsw_notice(priv, probe,
3639 "initialized device (regs %pa, irq %d, pool size %d)\n",
3640 &ss_res->start, ndev->irq, dma_params.descs_pool_size);
Mugunthan V Nd9ba8f92013-02-11 09:52:20 +00003641
Johan Hovoldc46ab7e2016-11-17 17:39:58 +01003642 pm_runtime_put(&pdev->dev);
3643
Mugunthan V Ndf828592012-03-18 20:17:54 +00003644 return 0;
3645
Johan Hovolda7fe9d42016-11-17 17:40:02 +01003646clean_unregister_netdev_ret:
3647 unregister_netdev(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003648clean_dma_ret:
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03003649 cpdma_ctlr_destroy(cpsw->dma);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003650clean_dt_ret:
3651 cpsw_remove_dt(pdev);
Johan Hovoldc46ab7e2016-11-17 17:39:58 +01003652 pm_runtime_put_sync(&pdev->dev);
Daniel Mackaa1a15e2013-09-21 00:50:38 +05303653clean_runtime_disable_ret:
Mugunthan V Nf150bd72012-07-17 08:09:50 +00003654 pm_runtime_disable(&pdev->dev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003655clean_ndev_ret:
Sebastian Siewiord1bd9ac2013-04-24 08:48:23 +00003656 free_netdev(priv->ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003657 return ret;
3658}
3659
Bill Pemberton663e12e2012-12-03 09:23:45 -05003660static int cpsw_remove(struct platform_device *pdev)
Mugunthan V Ndf828592012-03-18 20:17:54 +00003661{
3662 struct net_device *ndev = platform_get_drvdata(pdev);
Ivan Khoronzhuk2a05a622016-08-10 02:22:44 +03003663 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Grygorii Strashko8a0b6dc2016-07-28 20:50:35 +03003664 int ret;
3665
3666 ret = pm_runtime_get_sync(&pdev->dev);
3667 if (ret < 0) {
3668 pm_runtime_put_noidle(&pdev->dev);
3669 return ret;
3670 }
Mugunthan V Ndf828592012-03-18 20:17:54 +00003671
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003672 if (cpsw->data.dual_emac)
3673 unregister_netdev(cpsw->slaves[1].ndev);
Sebastian Siewiord1bd9ac2013-04-24 08:48:23 +00003674 unregister_netdev(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003675
Grygorii Strashko8a2c9a52016-12-06 18:00:41 -06003676 cpts_release(cpsw->cpts);
Ivan Khoronzhuk2c836bd2016-08-10 02:22:40 +03003677 cpdma_ctlr_destroy(cpsw->dma);
Johan Hovolda4e32b02016-11-17 17:40:00 +01003678 cpsw_remove_dt(pdev);
Grygorii Strashko8a0b6dc2016-07-28 20:50:35 +03003679 pm_runtime_put_sync(&pdev->dev);
3680 pm_runtime_disable(&pdev->dev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003681 if (cpsw->data.dual_emac)
3682 free_netdev(cpsw->slaves[1].ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003683 free_netdev(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003684 return 0;
3685}
3686
Grygorii Strashko8963a502015-02-27 13:19:45 +02003687#ifdef CONFIG_PM_SLEEP
Mugunthan V Ndf828592012-03-18 20:17:54 +00003688static int cpsw_suspend(struct device *dev)
3689{
Wolfram Sang4e13c2522018-10-21 22:00:17 +02003690 struct net_device *ndev = dev_get_drvdata(dev);
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003691 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003692
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003693 if (cpsw->data.dual_emac) {
Mugunthan V N618073e2014-09-11 22:52:38 +05303694 int i;
Daniel Mack1e7a2e22013-11-15 08:29:16 +01003695
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003696 for (i = 0; i < cpsw->data.slaves; i++) {
3697 if (netif_running(cpsw->slaves[i].ndev))
3698 cpsw_ndo_stop(cpsw->slaves[i].ndev);
Mugunthan V N618073e2014-09-11 22:52:38 +05303699 }
3700 } else {
3701 if (netif_running(ndev))
3702 cpsw_ndo_stop(ndev);
Mugunthan V N618073e2014-09-11 22:52:38 +05303703 }
Daniel Mack1e7a2e22013-11-15 08:29:16 +01003704
Mugunthan V N739683b2013-06-06 23:45:14 +05303705 /* Select sleep pin state */
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003706 pinctrl_pm_select_sleep_state(dev);
Mugunthan V N739683b2013-06-06 23:45:14 +05303707
Mugunthan V Ndf828592012-03-18 20:17:54 +00003708 return 0;
3709}
3710
3711static int cpsw_resume(struct device *dev)
3712{
Wolfram Sang4e13c2522018-10-21 22:00:17 +02003713 struct net_device *ndev = dev_get_drvdata(dev);
Ivan Khoronzhuka60ced92017-02-14 14:42:15 +02003714 struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003715
Mugunthan V N739683b2013-06-06 23:45:14 +05303716 /* Select default pin state */
Ivan Khoronzhuk56e31bd2016-08-10 02:22:38 +03003717 pinctrl_pm_select_default_state(dev);
Mugunthan V N739683b2013-06-06 23:45:14 +05303718
Grygorii Strashko4ccfd632016-11-29 16:27:03 -06003719 /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */
3720 rtnl_lock();
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003721 if (cpsw->data.dual_emac) {
Mugunthan V N618073e2014-09-11 22:52:38 +05303722 int i;
3723
Ivan Khoronzhuk606f3992016-08-10 02:22:42 +03003724 for (i = 0; i < cpsw->data.slaves; i++) {
3725 if (netif_running(cpsw->slaves[i].ndev))
3726 cpsw_ndo_open(cpsw->slaves[i].ndev);
Mugunthan V N618073e2014-09-11 22:52:38 +05303727 }
3728 } else {
3729 if (netif_running(ndev))
3730 cpsw_ndo_open(ndev);
3731 }
Grygorii Strashko4ccfd632016-11-29 16:27:03 -06003732 rtnl_unlock();
3733
Mugunthan V Ndf828592012-03-18 20:17:54 +00003734 return 0;
3735}
Grygorii Strashko8963a502015-02-27 13:19:45 +02003736#endif
Mugunthan V Ndf828592012-03-18 20:17:54 +00003737
Grygorii Strashko8963a502015-02-27 13:19:45 +02003738static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003739
3740static struct platform_driver cpsw_driver = {
3741 .driver = {
3742 .name = "cpsw",
Mugunthan V Ndf828592012-03-18 20:17:54 +00003743 .pm = &cpsw_pm_ops,
Sachin Kamat1e5c76d2013-09-30 09:55:12 +05303744 .of_match_table = cpsw_of_mtable,
Mugunthan V Ndf828592012-03-18 20:17:54 +00003745 },
3746 .probe = cpsw_probe,
Bill Pemberton663e12e2012-12-03 09:23:45 -05003747 .remove = cpsw_remove,
Mugunthan V Ndf828592012-03-18 20:17:54 +00003748};
3749
Grygorii Strashko6fb3b6b52015-10-23 14:41:12 +03003750module_platform_driver(cpsw_driver);
Mugunthan V Ndf828592012-03-18 20:17:54 +00003751
3752MODULE_LICENSE("GPL");
3753MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
3754MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
3755MODULE_DESCRIPTION("TI CPSW Ethernet driver");