blob: 606dabdb97421220fe68cdf7e2c5eab1657297ee [file] [log] [blame]
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001/* SuperH Ethernet device driver
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002 *
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +00003 * Copyright (C) 2006-2012 Nobuhiro Iwamatsu
Sergei Shtylyovb356e972014-02-18 03:12:43 +03004 * Copyright (C) 2008-2014 Renesas Solutions Corp.
5 * Copyright (C) 2013-2014 Cogent Embedded, Inc.
Ben Dooks702eca02014-03-12 17:47:40 +00006 * Copyright (C) 2014 Codethink Limited
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07007 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms and conditions of the GNU General Public License,
10 * version 2, as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070016 *
17 * The full GNU General Public License is included in this distribution in
18 * the file called "COPYING".
19 */
20
Yoshihiro Shimoda06540112011-09-29 17:16:57 +000021#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/spinlock.h>
David S. Miller823dcd22011-08-20 10:39:12 -070024#include <linux/interrupt.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070025#include <linux/dma-mapping.h>
26#include <linux/etherdevice.h>
27#include <linux/delay.h>
28#include <linux/platform_device.h>
29#include <linux/mdio-bitbang.h>
30#include <linux/netdevice.h>
Sergei Shtylyovb356e972014-02-18 03:12:43 +030031#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/of_irq.h>
34#include <linux/of_net.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070035#include <linux/phy.h>
36#include <linux/cache.h>
37#include <linux/io.h>
Magnus Dammbcd51492009-10-09 00:20:04 +000038#include <linux/pm_runtime.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000040#include <linux/ethtool.h>
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +000041#include <linux/if_vlan.h>
Nobuhiro Iwamatsuf0e81fe2012-03-25 18:59:51 +000042#include <linux/clk.h>
Yoshihiro Shimodad4fa0e32011-09-27 21:49:12 +000043#include <linux/sh_eth.h>
Ben Dooks702eca02014-03-12 17:47:40 +000044#include <linux/of_mdio.h>
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -070045
46#include "sh_eth.h"
47
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +000048#define SH_ETH_DEF_MSG_ENABLE \
49 (NETIF_MSG_LINK | \
50 NETIF_MSG_TIMER | \
51 NETIF_MSG_RX_ERR| \
52 NETIF_MSG_TX_ERR)
53
Sergei Shtylyovc0013f62013-03-28 11:48:26 +000054static const u16 sh_eth_offset_gigabit[SH_ETH_MAX_REGISTER_OFFSET] = {
55 [EDSR] = 0x0000,
56 [EDMR] = 0x0400,
57 [EDTRR] = 0x0408,
58 [EDRRR] = 0x0410,
59 [EESR] = 0x0428,
60 [EESIPR] = 0x0430,
61 [TDLAR] = 0x0010,
62 [TDFAR] = 0x0014,
63 [TDFXR] = 0x0018,
64 [TDFFR] = 0x001c,
65 [RDLAR] = 0x0030,
66 [RDFAR] = 0x0034,
67 [RDFXR] = 0x0038,
68 [RDFFR] = 0x003c,
69 [TRSCER] = 0x0438,
70 [RMFCR] = 0x0440,
71 [TFTR] = 0x0448,
72 [FDR] = 0x0450,
73 [RMCR] = 0x0458,
74 [RPADIR] = 0x0460,
75 [FCFTR] = 0x0468,
76 [CSMR] = 0x04E4,
77
78 [ECMR] = 0x0500,
79 [ECSR] = 0x0510,
80 [ECSIPR] = 0x0518,
81 [PIR] = 0x0520,
82 [PSR] = 0x0528,
83 [PIPR] = 0x052c,
84 [RFLR] = 0x0508,
85 [APR] = 0x0554,
86 [MPR] = 0x0558,
87 [PFTCR] = 0x055c,
88 [PFRCR] = 0x0560,
89 [TPAUSER] = 0x0564,
90 [GECMR] = 0x05b0,
91 [BCULR] = 0x05b4,
92 [MAHR] = 0x05c0,
93 [MALR] = 0x05c8,
94 [TROCR] = 0x0700,
95 [CDCR] = 0x0708,
96 [LCCR] = 0x0710,
97 [CEFCR] = 0x0740,
98 [FRECR] = 0x0748,
99 [TSFRCR] = 0x0750,
100 [TLFRCR] = 0x0758,
101 [RFCR] = 0x0760,
102 [CERCR] = 0x0768,
103 [CEECR] = 0x0770,
104 [MAFCR] = 0x0778,
105 [RMII_MII] = 0x0790,
106
107 [ARSTR] = 0x0000,
108 [TSU_CTRST] = 0x0004,
109 [TSU_FWEN0] = 0x0010,
110 [TSU_FWEN1] = 0x0014,
111 [TSU_FCM] = 0x0018,
112 [TSU_BSYSL0] = 0x0020,
113 [TSU_BSYSL1] = 0x0024,
114 [TSU_PRISL0] = 0x0028,
115 [TSU_PRISL1] = 0x002c,
116 [TSU_FWSL0] = 0x0030,
117 [TSU_FWSL1] = 0x0034,
118 [TSU_FWSLC] = 0x0038,
119 [TSU_QTAG0] = 0x0040,
120 [TSU_QTAG1] = 0x0044,
121 [TSU_FWSR] = 0x0050,
122 [TSU_FWINMK] = 0x0054,
123 [TSU_ADQT0] = 0x0048,
124 [TSU_ADQT1] = 0x004c,
125 [TSU_VTAG0] = 0x0058,
126 [TSU_VTAG1] = 0x005c,
127 [TSU_ADSBSY] = 0x0060,
128 [TSU_TEN] = 0x0064,
129 [TSU_POST1] = 0x0070,
130 [TSU_POST2] = 0x0074,
131 [TSU_POST3] = 0x0078,
132 [TSU_POST4] = 0x007c,
133 [TSU_ADRH0] = 0x0100,
134 [TSU_ADRL0] = 0x0104,
135 [TSU_ADRH31] = 0x01f8,
136 [TSU_ADRL31] = 0x01fc,
137
138 [TXNLCR0] = 0x0080,
139 [TXALCR0] = 0x0084,
140 [RXNLCR0] = 0x0088,
141 [RXALCR0] = 0x008c,
142 [FWNLCR0] = 0x0090,
143 [FWALCR0] = 0x0094,
144 [TXNLCR1] = 0x00a0,
145 [TXALCR1] = 0x00a0,
146 [RXNLCR1] = 0x00a8,
147 [RXALCR1] = 0x00ac,
148 [FWNLCR1] = 0x00b0,
149 [FWALCR1] = 0x00b4,
150};
151
Simon Hormandb893472014-01-17 09:22:28 +0900152static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
153 [EDSR] = 0x0000,
154 [EDMR] = 0x0400,
155 [EDTRR] = 0x0408,
156 [EDRRR] = 0x0410,
157 [EESR] = 0x0428,
158 [EESIPR] = 0x0430,
159 [TDLAR] = 0x0010,
160 [TDFAR] = 0x0014,
161 [TDFXR] = 0x0018,
162 [TDFFR] = 0x001c,
163 [RDLAR] = 0x0030,
164 [RDFAR] = 0x0034,
165 [RDFXR] = 0x0038,
166 [RDFFR] = 0x003c,
167 [TRSCER] = 0x0438,
168 [RMFCR] = 0x0440,
169 [TFTR] = 0x0448,
170 [FDR] = 0x0450,
171 [RMCR] = 0x0458,
172 [RPADIR] = 0x0460,
173 [FCFTR] = 0x0468,
174 [CSMR] = 0x04E4,
175
176 [ECMR] = 0x0500,
177 [RFLR] = 0x0508,
178 [ECSR] = 0x0510,
179 [ECSIPR] = 0x0518,
180 [PIR] = 0x0520,
181 [APR] = 0x0554,
182 [MPR] = 0x0558,
183 [PFTCR] = 0x055c,
184 [PFRCR] = 0x0560,
185 [TPAUSER] = 0x0564,
186 [MAHR] = 0x05c0,
187 [MALR] = 0x05c8,
188 [CEFCR] = 0x0740,
189 [FRECR] = 0x0748,
190 [TSFRCR] = 0x0750,
191 [TLFRCR] = 0x0758,
192 [RFCR] = 0x0760,
193 [MAFCR] = 0x0778,
194
195 [ARSTR] = 0x0000,
196 [TSU_CTRST] = 0x0004,
197 [TSU_VTAG0] = 0x0058,
198 [TSU_ADSBSY] = 0x0060,
199 [TSU_TEN] = 0x0064,
200 [TSU_ADRH0] = 0x0100,
201 [TSU_ADRL0] = 0x0104,
202 [TSU_ADRH31] = 0x01f8,
203 [TSU_ADRL31] = 0x01fc,
204
205 [TXNLCR0] = 0x0080,
206 [TXALCR0] = 0x0084,
207 [RXNLCR0] = 0x0088,
208 [RXALCR0] = 0x008C,
209};
210
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000211static const u16 sh_eth_offset_fast_rcar[SH_ETH_MAX_REGISTER_OFFSET] = {
212 [ECMR] = 0x0300,
213 [RFLR] = 0x0308,
214 [ECSR] = 0x0310,
215 [ECSIPR] = 0x0318,
216 [PIR] = 0x0320,
217 [PSR] = 0x0328,
218 [RDMLR] = 0x0340,
219 [IPGR] = 0x0350,
220 [APR] = 0x0354,
221 [MPR] = 0x0358,
222 [RFCF] = 0x0360,
223 [TPAUSER] = 0x0364,
224 [TPAUSECR] = 0x0368,
225 [MAHR] = 0x03c0,
226 [MALR] = 0x03c8,
227 [TROCR] = 0x03d0,
228 [CDCR] = 0x03d4,
229 [LCCR] = 0x03d8,
230 [CNDCR] = 0x03dc,
231 [CEFCR] = 0x03e4,
232 [FRECR] = 0x03e8,
233 [TSFRCR] = 0x03ec,
234 [TLFRCR] = 0x03f0,
235 [RFCR] = 0x03f4,
236 [MAFCR] = 0x03f8,
237
238 [EDMR] = 0x0200,
239 [EDTRR] = 0x0208,
240 [EDRRR] = 0x0210,
241 [TDLAR] = 0x0218,
242 [RDLAR] = 0x0220,
243 [EESR] = 0x0228,
244 [EESIPR] = 0x0230,
245 [TRSCER] = 0x0238,
246 [RMFCR] = 0x0240,
247 [TFTR] = 0x0248,
248 [FDR] = 0x0250,
249 [RMCR] = 0x0258,
250 [TFUCR] = 0x0264,
251 [RFOCR] = 0x0268,
Simon Horman55754f12013-07-23 10:18:04 +0900252 [RMIIMODE] = 0x026c,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000253 [FCFTR] = 0x0270,
254 [TRIMD] = 0x027c,
255};
256
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000257static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
258 [ECMR] = 0x0100,
259 [RFLR] = 0x0108,
260 [ECSR] = 0x0110,
261 [ECSIPR] = 0x0118,
262 [PIR] = 0x0120,
263 [PSR] = 0x0128,
264 [RDMLR] = 0x0140,
265 [IPGR] = 0x0150,
266 [APR] = 0x0154,
267 [MPR] = 0x0158,
268 [TPAUSER] = 0x0164,
269 [RFCF] = 0x0160,
270 [TPAUSECR] = 0x0168,
271 [BCFRR] = 0x016c,
272 [MAHR] = 0x01c0,
273 [MALR] = 0x01c8,
274 [TROCR] = 0x01d0,
275 [CDCR] = 0x01d4,
276 [LCCR] = 0x01d8,
277 [CNDCR] = 0x01dc,
278 [CEFCR] = 0x01e4,
279 [FRECR] = 0x01e8,
280 [TSFRCR] = 0x01ec,
281 [TLFRCR] = 0x01f0,
282 [RFCR] = 0x01f4,
283 [MAFCR] = 0x01f8,
284 [RTRATE] = 0x01fc,
285
286 [EDMR] = 0x0000,
287 [EDTRR] = 0x0008,
288 [EDRRR] = 0x0010,
289 [TDLAR] = 0x0018,
290 [RDLAR] = 0x0020,
291 [EESR] = 0x0028,
292 [EESIPR] = 0x0030,
293 [TRSCER] = 0x0038,
294 [RMFCR] = 0x0040,
295 [TFTR] = 0x0048,
296 [FDR] = 0x0050,
297 [RMCR] = 0x0058,
298 [TFUCR] = 0x0064,
299 [RFOCR] = 0x0068,
300 [FCFTR] = 0x0070,
301 [RPADIR] = 0x0078,
302 [TRIMD] = 0x007c,
303 [RBWAR] = 0x00c8,
304 [RDFAR] = 0x00cc,
305 [TBRAR] = 0x00d4,
306 [TDFAR] = 0x00d8,
307};
308
309static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
Sergei Shtylyovd8b04262014-06-03 23:42:26 +0400310 [EDMR] = 0x0000,
311 [EDTRR] = 0x0004,
312 [EDRRR] = 0x0008,
313 [TDLAR] = 0x000c,
314 [RDLAR] = 0x0010,
315 [EESR] = 0x0014,
316 [EESIPR] = 0x0018,
317 [TRSCER] = 0x001c,
318 [RMFCR] = 0x0020,
319 [TFTR] = 0x0024,
320 [FDR] = 0x0028,
321 [RMCR] = 0x002c,
322 [EDOCR] = 0x0030,
323 [FCFTR] = 0x0034,
324 [RPADIR] = 0x0038,
325 [TRIMD] = 0x003c,
326 [RBWAR] = 0x0040,
327 [RDFAR] = 0x0044,
328 [TBRAR] = 0x004c,
329 [TDFAR] = 0x0050,
330
Sergei Shtylyovc0013f62013-03-28 11:48:26 +0000331 [ECMR] = 0x0160,
332 [ECSR] = 0x0164,
333 [ECSIPR] = 0x0168,
334 [PIR] = 0x016c,
335 [MAHR] = 0x0170,
336 [MALR] = 0x0174,
337 [RFLR] = 0x0178,
338 [PSR] = 0x017c,
339 [TROCR] = 0x0180,
340 [CDCR] = 0x0184,
341 [LCCR] = 0x0188,
342 [CNDCR] = 0x018c,
343 [CEFCR] = 0x0194,
344 [FRECR] = 0x0198,
345 [TSFRCR] = 0x019c,
346 [TLFRCR] = 0x01a0,
347 [RFCR] = 0x01a4,
348 [MAFCR] = 0x01a8,
349 [IPGR] = 0x01b4,
350 [APR] = 0x01b8,
351 [MPR] = 0x01bc,
352 [TPAUSER] = 0x01c4,
353 [BCFR] = 0x01cc,
354
355 [ARSTR] = 0x0000,
356 [TSU_CTRST] = 0x0004,
357 [TSU_FWEN0] = 0x0010,
358 [TSU_FWEN1] = 0x0014,
359 [TSU_FCM] = 0x0018,
360 [TSU_BSYSL0] = 0x0020,
361 [TSU_BSYSL1] = 0x0024,
362 [TSU_PRISL0] = 0x0028,
363 [TSU_PRISL1] = 0x002c,
364 [TSU_FWSL0] = 0x0030,
365 [TSU_FWSL1] = 0x0034,
366 [TSU_FWSLC] = 0x0038,
367 [TSU_QTAGM0] = 0x0040,
368 [TSU_QTAGM1] = 0x0044,
369 [TSU_ADQT0] = 0x0048,
370 [TSU_ADQT1] = 0x004c,
371 [TSU_FWSR] = 0x0050,
372 [TSU_FWINMK] = 0x0054,
373 [TSU_ADSBSY] = 0x0060,
374 [TSU_TEN] = 0x0064,
375 [TSU_POST1] = 0x0070,
376 [TSU_POST2] = 0x0074,
377 [TSU_POST3] = 0x0078,
378 [TSU_POST4] = 0x007c,
379
380 [TXNLCR0] = 0x0080,
381 [TXALCR0] = 0x0084,
382 [RXNLCR0] = 0x0088,
383 [RXALCR0] = 0x008c,
384 [FWNLCR0] = 0x0090,
385 [FWALCR0] = 0x0094,
386 [TXNLCR1] = 0x00a0,
387 [TXALCR1] = 0x00a0,
388 [RXNLCR1] = 0x00a8,
389 [RXALCR1] = 0x00ac,
390 [FWNLCR1] = 0x00b0,
391 [FWALCR1] = 0x00b4,
392
393 [TSU_ADRH0] = 0x0100,
394 [TSU_ADRL0] = 0x0104,
395 [TSU_ADRL31] = 0x01fc,
396};
397
Simon Horman504c8ca2014-01-17 09:22:27 +0900398static bool sh_eth_is_gether(struct sh_eth_private *mdp)
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000399{
Simon Horman504c8ca2014-01-17 09:22:27 +0900400 return mdp->reg_offset == sh_eth_offset_gigabit;
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000401}
402
Simon Hormandb893472014-01-17 09:22:28 +0900403static bool sh_eth_is_rz_fast_ether(struct sh_eth_private *mdp)
404{
405 return mdp->reg_offset == sh_eth_offset_fast_rz;
406}
407
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400408static void sh_eth_select_mii(struct net_device *ndev)
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000409{
410 u32 value = 0x0;
411 struct sh_eth_private *mdp = netdev_priv(ndev);
412
413 switch (mdp->phy_interface) {
414 case PHY_INTERFACE_MODE_GMII:
415 value = 0x2;
416 break;
417 case PHY_INTERFACE_MODE_MII:
418 value = 0x1;
419 break;
420 case PHY_INTERFACE_MODE_RMII:
421 value = 0x0;
422 break;
423 default:
Sergei Shtylyovf75f14e2014-03-15 03:27:54 +0300424 netdev_warn(ndev,
425 "PHY interface mode was not setup. Set to MII.\n");
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000426 value = 0x1;
427 break;
428 }
429
430 sh_eth_write(ndev, value, RMII_MII);
431}
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000432
Sergei Shtylyov8e994402013-06-12 03:07:29 +0400433static void sh_eth_set_duplex(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000434{
435 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000436
437 if (mdp->duplex) /* Full */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000438 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000439 else /* Half */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000440 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_DM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000441}
442
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000443/* There is CPU dependent code */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000444static void sh_eth_set_rate_r8a777x(struct net_device *ndev)
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000445{
446 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000447
448 switch (mdp->speed) {
449 case 10: /* 10BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000450 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_ELB, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000451 break;
452 case 100:/* 100BASE */
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000453 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_ELB, ECMR);
454 break;
455 default:
456 break;
457 }
458}
459
Sergei Shtylyov674853b2013-04-27 10:44:24 +0000460/* R8A7778/9 */
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000461static struct sh_eth_cpu_data r8a777x_data = {
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000462 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov589ebde2013-06-07 14:05:59 +0000463 .set_rate = sh_eth_set_rate_r8a777x,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000464
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400465 .register_type = SH_ETH_REG_FAST_RCAR,
466
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000467 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
468 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
469 .eesipr_value = 0x01ff009f,
470
471 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400472 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
473 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
474 EESR_ECI,
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000475
476 .apr = 1,
477 .mpr = 1,
478 .tpauser = 1,
479 .hw_swap = 1,
480};
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000481
Sergei Shtylyov94a12b12013-12-08 02:59:18 +0300482/* R8A7790/1 */
483static struct sh_eth_cpu_data r8a779x_data = {
Simon Hormane18dbf72013-07-23 10:18:05 +0900484 .set_duplex = sh_eth_set_duplex,
485 .set_rate = sh_eth_set_rate_r8a777x,
486
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400487 .register_type = SH_ETH_REG_FAST_RCAR,
488
Simon Hormane18dbf72013-07-23 10:18:05 +0900489 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
490 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
491 .eesipr_value = 0x01ff009f,
492
493 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Laurent Pinchartba361cb2013-07-31 16:42:11 +0900494 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
495 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
496 EESR_ECI,
Simon Hormane18dbf72013-07-23 10:18:05 +0900497
498 .apr = 1,
499 .mpr = 1,
500 .tpauser = 1,
501 .hw_swap = 1,
502 .rmiimode = 1,
Kouei Abefd9af072013-08-30 12:41:08 +0900503 .shift_rd0 = 1,
Simon Hormane18dbf72013-07-23 10:18:05 +0900504};
505
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000506static void sh_eth_set_rate_sh7724(struct net_device *ndev)
Sergei Shtylyova3f109b2013-03-28 11:51:31 +0000507{
508 struct sh_eth_private *mdp = netdev_priv(ndev);
509
510 switch (mdp->speed) {
511 case 10: /* 10BASE */
512 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) & ~ECMR_RTM, ECMR);
513 break;
514 case 100:/* 100BASE */
515 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) | ECMR_RTM, ECMR);
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000516 break;
517 default:
518 break;
519 }
520}
521
522/* SH7724 */
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000523static struct sh_eth_cpu_data sh7724_data = {
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000524 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +0000525 .set_rate = sh_eth_set_rate_sh7724,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000526
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400527 .register_type = SH_ETH_REG_FAST_SH4,
528
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000529 .ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
530 .ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
Sergei Shtylyova80c3de2013-06-20 02:24:54 +0400531 .eesipr_value = 0x01ff009f,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000532
533 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400534 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
535 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
536 EESR_ECI,
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000537
538 .apr = 1,
539 .mpr = 1,
540 .tpauser = 1,
541 .hw_swap = 1,
Magnus Damm503914c2009-12-15 21:16:55 -0800542 .rpadir = 1,
543 .rpadir_value = 0x00020000, /* NET_IP_ALIGN assumed to be 2 */
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000544};
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000545
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000546static void sh_eth_set_rate_sh7757(struct net_device *ndev)
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000547{
548 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000549
550 switch (mdp->speed) {
551 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000552 sh_eth_write(ndev, 0, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000553 break;
554 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000555 sh_eth_write(ndev, 1, RTRATE);
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000556 break;
557 default:
558 break;
559 }
560}
561
562/* SH7757 */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000563static struct sh_eth_cpu_data sh7757_data = {
564 .set_duplex = sh_eth_set_duplex,
565 .set_rate = sh_eth_set_rate_sh7757,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000566
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400567 .register_type = SH_ETH_REG_FAST_SH4,
568
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000569 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000570
571 .tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400572 .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
573 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
574 EESR_ECI,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000575
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000576 .irq_flags = IRQF_SHARED,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000577 .apr = 1,
578 .mpr = 1,
579 .tpauser = 1,
580 .hw_swap = 1,
581 .no_ade = 1,
Yoshihiro Shimoda2e98e792011-07-05 20:33:57 +0000582 .rpadir = 1,
583 .rpadir_value = 2 << 16,
Yoshihiro Shimodaf29a3d02010-07-05 18:32:50 +0000584};
Yoshihiro Shimoda65ac8852009-05-24 23:54:30 +0000585
David S. Millere403d292013-06-07 23:40:41 -0700586#define SH_GIGA_ETH_BASE 0xfee00000UL
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000587#define GIGA_MALR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c8)
588#define GIGA_MAHR(port) (SH_GIGA_ETH_BASE + 0x800 * (port) + 0x05c0)
589static void sh_eth_chip_reset_giga(struct net_device *ndev)
590{
591 int i;
592 unsigned long mahr[2], malr[2];
593
594 /* save MAHR and MALR */
595 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000596 malr[i] = ioread32((void *)GIGA_MALR(i));
597 mahr[i] = ioread32((void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000598 }
599
600 /* reset device */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000601 iowrite32(ARSTR_ARSTR, (void *)(SH_GIGA_ETH_BASE + 0x1800));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000602 mdelay(1);
603
604 /* restore MAHR and MALR */
605 for (i = 0; i < 2; i++) {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +0000606 iowrite32(malr[i], (void *)GIGA_MALR(i));
607 iowrite32(mahr[i], (void *)GIGA_MAHR(i));
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000608 }
609}
610
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000611static void sh_eth_set_rate_giga(struct net_device *ndev)
612{
613 struct sh_eth_private *mdp = netdev_priv(ndev);
614
615 switch (mdp->speed) {
616 case 10: /* 10BASE */
617 sh_eth_write(ndev, 0x00000000, GECMR);
618 break;
619 case 100:/* 100BASE */
620 sh_eth_write(ndev, 0x00000010, GECMR);
621 break;
622 case 1000: /* 1000BASE */
623 sh_eth_write(ndev, 0x00000020, GECMR);
624 break;
625 default:
626 break;
627 }
628}
629
630/* SH7757(GETHERC) */
Sergei Shtylyov24549e22013-06-07 13:59:21 +0000631static struct sh_eth_cpu_data sh7757_data_giga = {
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000632 .chip_reset = sh_eth_chip_reset_giga,
Nobuhiro Iwamatsu04b0ed22013-06-06 09:45:25 +0000633 .set_duplex = sh_eth_set_duplex,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000634 .set_rate = sh_eth_set_rate_giga,
635
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400636 .register_type = SH_ETH_REG_GIGABIT,
637
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000638 .ecsr_value = ECSR_ICD | ECSR_MPD,
639 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
640 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
641
642 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400643 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
644 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
645 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000646 .fdr_value = 0x0000072f,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000647
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +0000648 .irq_flags = IRQF_SHARED,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000649 .apr = 1,
650 .mpr = 1,
651 .tpauser = 1,
652 .bculr = 1,
653 .hw_swap = 1,
654 .rpadir = 1,
655 .rpadir_value = 2 << 16,
656 .no_trimd = 1,
657 .no_ade = 1,
Yoshihiro Shimoda3acbc972012-02-15 17:54:51 +0000658 .tsu = 1,
Yoshihiro Shimoda8fcd4962011-03-07 21:59:49 +0000659};
660
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000661static void sh_eth_chip_reset(struct net_device *ndev)
662{
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000663 struct sh_eth_private *mdp = netdev_priv(ndev);
664
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000665 /* reset device */
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000666 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000667 mdelay(1);
668}
669
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000670static void sh_eth_set_rate_gether(struct net_device *ndev)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000671{
672 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000673
674 switch (mdp->speed) {
675 case 10: /* 10BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000676 sh_eth_write(ndev, GECMR_10, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000677 break;
678 case 100:/* 100BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000679 sh_eth_write(ndev, GECMR_100, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000680 break;
681 case 1000: /* 1000BASE */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000682 sh_eth_write(ndev, GECMR_1000, GECMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000683 break;
684 default:
685 break;
686 }
687}
688
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000689/* SH7734 */
690static struct sh_eth_cpu_data sh7734_data = {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000691 .chip_reset = sh_eth_chip_reset,
692 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000693 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000694
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400695 .register_type = SH_ETH_REG_GIGABIT,
696
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000697 .ecsr_value = ECSR_ICD | ECSR_MPD,
698 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
699 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
700
701 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400702 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
703 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
704 EESR_TDE | EESR_ECI,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000705
706 .apr = 1,
707 .mpr = 1,
708 .tpauser = 1,
709 .bculr = 1,
710 .hw_swap = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000711 .no_trimd = 1,
712 .no_ade = 1,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000713 .tsu = 1,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000714 .hw_crc = 1,
715 .select_mii = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000716};
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000717
718/* SH7763 */
719static struct sh_eth_cpu_data sh7763_data = {
720 .chip_reset = sh_eth_chip_reset,
721 .set_duplex = sh_eth_set_duplex,
722 .set_rate = sh_eth_set_rate_gether,
723
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400724 .register_type = SH_ETH_REG_GIGABIT,
725
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000726 .ecsr_value = ECSR_ICD | ECSR_MPD,
727 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
728 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
729
730 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300731 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
732 EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000733 EESR_ECI,
Sergei Shtylyovf5d12762013-06-07 13:58:18 +0000734
735 .apr = 1,
736 .mpr = 1,
737 .tpauser = 1,
738 .bculr = 1,
739 .hw_swap = 1,
740 .no_trimd = 1,
741 .no_ade = 1,
742 .tsu = 1,
743 .irq_flags = IRQF_SHARED,
744};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000745
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000746static void sh_eth_chip_reset_r8a7740(struct net_device *ndev)
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000747{
748 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000749
750 /* reset device */
751 sh_eth_tsu_write(mdp, ARSTR_ARSTR, ARSTR);
752 mdelay(1);
753
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000754 sh_eth_select_mii(ndev);
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000755}
756
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000757/* R8A7740 */
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000758static struct sh_eth_cpu_data r8a7740_data = {
759 .chip_reset = sh_eth_chip_reset_r8a7740,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000760 .set_duplex = sh_eth_set_duplex,
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +0000761 .set_rate = sh_eth_set_rate_gether,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000762
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400763 .register_type = SH_ETH_REG_GIGABIT,
764
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000765 .ecsr_value = ECSR_ICD | ECSR_MPD,
766 .ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
767 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
768
769 .tx_check = EESR_TC1 | EESR_FTC,
Sergei Shtylyovca8c3582013-06-21 01:12:21 +0400770 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
771 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
772 EESR_TDE | EESR_ECI,
Simon Hormancc235282013-10-10 14:51:16 +0900773 .fdr_value = 0x0000070f,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000774
775 .apr = 1,
776 .mpr = 1,
777 .tpauser = 1,
778 .bculr = 1,
779 .hw_swap = 1,
Simon Hormancc235282013-10-10 14:51:16 +0900780 .rpadir = 1,
781 .rpadir_value = 2 << 16,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000782 .no_trimd = 1,
783 .no_ade = 1,
784 .tsu = 1,
Nobuhiro Iwamatsu5e7a76b2012-06-25 17:34:14 +0000785 .select_mii = 1,
Sergei Shtylyovac8025a2013-06-13 22:12:45 +0400786 .shift_rd0 = 1,
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000787};
788
Simon Hormandb893472014-01-17 09:22:28 +0900789/* R7S72100 */
790static struct sh_eth_cpu_data r7s72100_data = {
791 .chip_reset = sh_eth_chip_reset,
792 .set_duplex = sh_eth_set_duplex,
793
794 .register_type = SH_ETH_REG_FAST_RZ,
795
796 .ecsr_value = ECSR_ICD,
797 .ecsipr_value = ECSIPR_ICDIP,
798 .eesipr_value = 0xff7f009f,
799
800 .tx_check = EESR_TC1 | EESR_FTC,
801 .eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
802 EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
803 EESR_TDE | EESR_ECI,
804 .fdr_value = 0x0000070f,
Simon Hormandb893472014-01-17 09:22:28 +0900805
806 .no_psr = 1,
807 .apr = 1,
808 .mpr = 1,
809 .tpauser = 1,
810 .hw_swap = 1,
811 .rpadir = 1,
812 .rpadir_value = 2 << 16,
813 .no_trimd = 1,
814 .no_ade = 1,
815 .hw_crc = 1,
816 .tsu = 1,
817 .shift_rd0 = 1,
818};
819
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +0000820static struct sh_eth_cpu_data sh7619_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400821 .register_type = SH_ETH_REG_FAST_SH3_SH2,
822
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000823 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
824
825 .apr = 1,
826 .mpr = 1,
827 .tpauser = 1,
828 .hw_swap = 1,
829};
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +0000830
831static struct sh_eth_cpu_data sh771x_data = {
Sergei Shtylyova3153d82013-08-18 03:11:28 +0400832 .register_type = SH_ETH_REG_FAST_SH3_SH2,
833
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000834 .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +0000835 .tsu = 1,
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000836};
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000837
838static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
839{
840 if (!cd->ecsr_value)
841 cd->ecsr_value = DEFAULT_ECSR_INIT;
842
843 if (!cd->ecsipr_value)
844 cd->ecsipr_value = DEFAULT_ECSIPR_INIT;
845
846 if (!cd->fcftr_value)
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300847 cd->fcftr_value = DEFAULT_FIFO_F_D_RFF |
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000848 DEFAULT_FIFO_F_D_RFD;
849
850 if (!cd->fdr_value)
851 cd->fdr_value = DEFAULT_FDR_INIT;
852
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000853 if (!cd->tx_check)
854 cd->tx_check = DEFAULT_TX_CHECK;
855
856 if (!cd->eesr_err_check)
857 cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000858}
859
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000860static int sh_eth_check_reset(struct net_device *ndev)
861{
862 int ret = 0;
863 int cnt = 100;
864
865 while (cnt > 0) {
866 if (!(sh_eth_read(ndev, EDMR) & 0x3))
867 break;
868 mdelay(1);
869 cnt--;
870 }
Sergei Shtylyov9f8c4262013-06-05 23:54:01 +0400871 if (cnt <= 0) {
Sergei Shtylyovf75f14e2014-03-15 03:27:54 +0300872 netdev_err(ndev, "Device reset failed\n");
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +0000873 ret = -ETIMEDOUT;
874 }
875 return ret;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000876}
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000877
878static int sh_eth_reset(struct net_device *ndev)
879{
880 struct sh_eth_private *mdp = netdev_priv(ndev);
881 int ret = 0;
882
Simon Hormandb893472014-01-17 09:22:28 +0900883 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp)) {
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000884 sh_eth_write(ndev, EDSR_ENALL, EDSR);
885 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_GETHER,
886 EDMR);
887
888 ret = sh_eth_check_reset(ndev);
889 if (ret)
Laurent Pinchartf738a132014-03-20 15:00:35 +0100890 return ret;
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000891
892 /* Table Init */
893 sh_eth_write(ndev, 0x0, TDLAR);
894 sh_eth_write(ndev, 0x0, TDFAR);
895 sh_eth_write(ndev, 0x0, TDFXR);
896 sh_eth_write(ndev, 0x0, TDFFR);
897 sh_eth_write(ndev, 0x0, RDLAR);
898 sh_eth_write(ndev, 0x0, RDFAR);
899 sh_eth_write(ndev, 0x0, RDFXR);
900 sh_eth_write(ndev, 0x0, RDFFR);
901
902 /* Reset HW CRC register */
903 if (mdp->cd->hw_crc)
904 sh_eth_write(ndev, 0x0, CSMR);
905
906 /* Select MII mode */
907 if (mdp->cd->select_mii)
908 sh_eth_select_mii(ndev);
909 } else {
910 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) | EDMR_SRST_ETHER,
911 EDMR);
912 mdelay(3);
913 sh_eth_write(ndev, sh_eth_read(ndev, EDMR) & ~EDMR_SRST_ETHER,
914 EDMR);
915 }
916
Nobuhiro Iwamatsudabdde92013-06-06 09:51:39 +0000917 return ret;
918}
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000919
Yoshihiro Shimoda73a0d902012-04-04 18:37:10 +0000920#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +0000921static void sh_eth_set_receive_align(struct sk_buff *skb)
922{
923 int reserve;
924
925 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926 if (reserve)
927 skb_reserve(skb, reserve);
928}
929#else
930static void sh_eth_set_receive_align(struct sk_buff *skb)
931{
932 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933}
934#endif
935
936
Yoshinori Sato71557a32008-08-06 19:49:00 -0400937/* CPU <-> EDMAC endian convert */
938static inline __u32 cpu_to_edmac(struct sh_eth_private *mdp, u32 x)
939{
940 switch (mdp->edmac_endian) {
941 case EDMAC_LITTLE_ENDIAN:
942 return cpu_to_le32(x);
943 case EDMAC_BIG_ENDIAN:
944 return cpu_to_be32(x);
945 }
946 return x;
947}
948
949static inline __u32 edmac_to_cpu(struct sh_eth_private *mdp, u32 x)
950{
951 switch (mdp->edmac_endian) {
952 case EDMAC_LITTLE_ENDIAN:
953 return le32_to_cpu(x);
954 case EDMAC_BIG_ENDIAN:
955 return be32_to_cpu(x);
956 }
957 return x;
958}
959
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300960/* Program the hardware MAC address from dev->dev_addr. */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700961static void update_mac_address(struct net_device *ndev)
962{
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000963 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300964 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
965 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000966 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300967 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700968}
969
Sergei Shtylyov128296f2014-01-03 15:52:22 +0300970/* Get MAC address from SuperH MAC address register
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700971 *
972 * SuperH's Ethernet device doesn't have 'ROM' to MAC address.
973 * This driver get MAC address that use by bootloader(U-boot or sh-ipl+g).
974 * When you want use this device, you must set MAC address in bootloader.
975 *
976 */
Magnus Damm748031f2009-10-09 00:17:14 +0000977static void read_mac_address(struct net_device *ndev, unsigned char *mac)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700978{
Magnus Damm748031f2009-10-09 00:17:14 +0000979 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
Joe Perchesd458cdf2013-10-01 19:04:40 -0700980 memcpy(ndev->dev_addr, mac, ETH_ALEN);
Magnus Damm748031f2009-10-09 00:17:14 +0000981 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +0000982 ndev->dev_addr[0] = (sh_eth_read(ndev, MAHR) >> 24);
983 ndev->dev_addr[1] = (sh_eth_read(ndev, MAHR) >> 16) & 0xFF;
984 ndev->dev_addr[2] = (sh_eth_read(ndev, MAHR) >> 8) & 0xFF;
985 ndev->dev_addr[3] = (sh_eth_read(ndev, MAHR) & 0xFF);
986 ndev->dev_addr[4] = (sh_eth_read(ndev, MALR) >> 8) & 0xFF;
987 ndev->dev_addr[5] = (sh_eth_read(ndev, MALR) & 0xFF);
Magnus Damm748031f2009-10-09 00:17:14 +0000988 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700989}
990
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000991static unsigned long sh_eth_get_edtrr_trns(struct sh_eth_private *mdp)
992{
Simon Hormandb893472014-01-17 09:22:28 +0900993 if (sh_eth_is_gether(mdp) || sh_eth_is_rz_fast_ether(mdp))
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +0000994 return EDTRR_TRNS_GETHER;
995 else
996 return EDTRR_TRNS_ETHER;
997}
998
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -0700999struct bb_info {
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001000 void (*set_gate)(void *addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001001 struct mdiobb_ctrl ctrl;
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001002 void *addr;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001003 u32 mmd_msk;/* MMD */
1004 u32 mdo_msk;
1005 u32 mdi_msk;
1006 u32 mdc_msk;
1007};
1008
1009/* PHY bit set */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001010static void bb_set(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001011{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001012 iowrite32(ioread32(addr) | msk, addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001013}
1014
1015/* PHY bit clear */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001016static void bb_clr(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001017{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001018 iowrite32((ioread32(addr) & ~msk), addr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001019}
1020
1021/* PHY bit read */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001022static int bb_read(void *addr, u32 msk)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001023{
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00001024 return (ioread32(addr) & msk) != 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001025}
1026
1027/* Data I/O pin control */
1028static void sh_mmd_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1029{
1030 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001031
1032 if (bitbang->set_gate)
1033 bitbang->set_gate(bitbang->addr);
1034
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001035 if (bit)
1036 bb_set(bitbang->addr, bitbang->mmd_msk);
1037 else
1038 bb_clr(bitbang->addr, bitbang->mmd_msk);
1039}
1040
1041/* Set bit data*/
1042static void sh_set_mdio(struct mdiobb_ctrl *ctrl, int bit)
1043{
1044 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1045
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001046 if (bitbang->set_gate)
1047 bitbang->set_gate(bitbang->addr);
1048
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001049 if (bit)
1050 bb_set(bitbang->addr, bitbang->mdo_msk);
1051 else
1052 bb_clr(bitbang->addr, bitbang->mdo_msk);
1053}
1054
1055/* Get bit data*/
1056static int sh_get_mdio(struct mdiobb_ctrl *ctrl)
1057{
1058 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001059
1060 if (bitbang->set_gate)
1061 bitbang->set_gate(bitbang->addr);
1062
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001063 return bb_read(bitbang->addr, bitbang->mdi_msk);
1064}
1065
1066/* MDC pin control */
1067static void sh_mdc_ctrl(struct mdiobb_ctrl *ctrl, int bit)
1068{
1069 struct bb_info *bitbang = container_of(ctrl, struct bb_info, ctrl);
1070
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00001071 if (bitbang->set_gate)
1072 bitbang->set_gate(bitbang->addr);
1073
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001074 if (bit)
1075 bb_set(bitbang->addr, bitbang->mdc_msk);
1076 else
1077 bb_clr(bitbang->addr, bitbang->mdc_msk);
1078}
1079
1080/* mdio bus control struct */
1081static struct mdiobb_ops bb_ops = {
1082 .owner = THIS_MODULE,
1083 .set_mdc = sh_mdc_ctrl,
1084 .set_mdio_dir = sh_mmd_ctrl,
1085 .set_mdio_data = sh_set_mdio,
1086 .get_mdio_data = sh_get_mdio,
1087};
1088
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001089/* free skb and descriptor buffer */
1090static void sh_eth_ring_free(struct net_device *ndev)
1091{
1092 struct sh_eth_private *mdp = netdev_priv(ndev);
1093 int i;
1094
1095 /* Free Rx skb ringbuffer */
1096 if (mdp->rx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001097 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001098 if (mdp->rx_skbuff[i])
1099 dev_kfree_skb(mdp->rx_skbuff[i]);
1100 }
1101 }
1102 kfree(mdp->rx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001103 mdp->rx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001104
1105 /* Free Tx skb ringbuffer */
1106 if (mdp->tx_skbuff) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001107 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001108 if (mdp->tx_skbuff[i])
1109 dev_kfree_skb(mdp->tx_skbuff[i]);
1110 }
1111 }
1112 kfree(mdp->tx_skbuff);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001113 mdp->tx_skbuff = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001114}
1115
1116/* format skb and descriptor buffer */
1117static void sh_eth_ring_format(struct net_device *ndev)
1118{
1119 struct sh_eth_private *mdp = netdev_priv(ndev);
1120 int i;
1121 struct sk_buff *skb;
1122 struct sh_eth_rxdesc *rxdesc = NULL;
1123 struct sh_eth_txdesc *txdesc = NULL;
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001124 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1125 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001126
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001127 mdp->cur_rx = 0;
1128 mdp->cur_tx = 0;
1129 mdp->dirty_rx = 0;
1130 mdp->dirty_tx = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001131
1132 memset(mdp->rx_ring, 0, rx_ringsize);
1133
1134 /* build Rx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001135 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001136 /* skb */
1137 mdp->rx_skbuff[i] = NULL;
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001138 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001139 mdp->rx_skbuff[i] = skb;
1140 if (skb == NULL)
1141 break;
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001142 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001143 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001144 sh_eth_set_receive_align(skb);
1145
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001146 /* RX descriptor */
1147 rxdesc = &mdp->rx_ring[i];
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001148 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Yoshinori Sato71557a32008-08-06 19:49:00 -04001149 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001150
1151 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001152 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001153 /* Rx descriptor address set */
1154 if (i == 0) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001155 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
Simon Hormandb893472014-01-17 09:22:28 +09001156 if (sh_eth_is_gether(mdp) ||
1157 sh_eth_is_rz_fast_ether(mdp))
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001158 sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001159 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001160 }
1161
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001162 mdp->dirty_rx = (u32) (i - mdp->num_rx_ring);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001163
1164 /* Mark the last entry as wrapping the ring. */
Yoshinori Sato71557a32008-08-06 19:49:00 -04001165 rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001166
1167 memset(mdp->tx_ring, 0, tx_ringsize);
1168
1169 /* build Tx ring buffer */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001170 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001171 mdp->tx_skbuff[i] = NULL;
1172 txdesc = &mdp->tx_ring[i];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001173 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001174 txdesc->buffer_length = 0;
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001175 if (i == 0) {
Yoshinori Sato71557a32008-08-06 19:49:00 -04001176 /* Tx descriptor address set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001177 sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
Simon Hormandb893472014-01-17 09:22:28 +09001178 if (sh_eth_is_gether(mdp) ||
1179 sh_eth_is_rz_fast_ether(mdp))
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001180 sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001181 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001182 }
1183
Yoshinori Sato71557a32008-08-06 19:49:00 -04001184 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001185}
1186
1187/* Get skb and descriptor buffer */
1188static int sh_eth_ring_init(struct net_device *ndev)
1189{
1190 struct sh_eth_private *mdp = netdev_priv(ndev);
1191 int rx_ringsize, tx_ringsize, ret = 0;
1192
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001193 /* +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001194 * card needs room to do 8 byte alignment, +2 so we can reserve
1195 * the first 2 bytes, and +16 gets room for the status word from the
1196 * card.
1197 */
1198 mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
1199 (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));
Magnus Damm503914c2009-12-15 21:16:55 -08001200 if (mdp->cd->rpadir)
1201 mdp->rx_buf_sz += NET_IP_ALIGN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001202
1203 /* Allocate RX and TX skb rings */
Joe Perchesb2adaca2013-02-03 17:43:58 +00001204 mdp->rx_skbuff = kmalloc_array(mdp->num_rx_ring,
1205 sizeof(*mdp->rx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001206 if (!mdp->rx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001207 ret = -ENOMEM;
1208 return ret;
1209 }
1210
Joe Perchesb2adaca2013-02-03 17:43:58 +00001211 mdp->tx_skbuff = kmalloc_array(mdp->num_tx_ring,
1212 sizeof(*mdp->tx_skbuff), GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001213 if (!mdp->tx_skbuff) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001214 ret = -ENOMEM;
1215 goto skb_ring_free;
1216 }
1217
1218 /* Allocate all Rx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001219 rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001220 mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001221 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001222 if (!mdp->rx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001223 ret = -ENOMEM;
1224 goto desc_ring_free;
1225 }
1226
1227 mdp->dirty_rx = 0;
1228
1229 /* Allocate all Tx descriptors. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001230 tx_ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001231 mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
Joe Perchesd0320f72013-03-14 13:07:21 +00001232 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001233 if (!mdp->tx_ring) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001234 ret = -ENOMEM;
1235 goto desc_ring_free;
1236 }
1237 return ret;
1238
1239desc_ring_free:
1240 /* free DMA buffer */
1241 dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);
1242
1243skb_ring_free:
1244 /* Free Rx and Tx skb ring buffer */
1245 sh_eth_ring_free(ndev);
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001246 mdp->tx_ring = NULL;
1247 mdp->rx_ring = NULL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001248
1249 return ret;
1250}
1251
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001252static void sh_eth_free_dma_buffer(struct sh_eth_private *mdp)
1253{
1254 int ringsize;
1255
1256 if (mdp->rx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001257 ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001258 dma_free_coherent(NULL, ringsize, mdp->rx_ring,
1259 mdp->rx_desc_dma);
1260 mdp->rx_ring = NULL;
1261 }
1262
1263 if (mdp->tx_ring) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001264 ringsize = sizeof(struct sh_eth_txdesc) * mdp->num_tx_ring;
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00001265 dma_free_coherent(NULL, ringsize, mdp->tx_ring,
1266 mdp->tx_desc_dma);
1267 mdp->tx_ring = NULL;
1268 }
1269}
1270
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001271static int sh_eth_dev_init(struct net_device *ndev, bool start)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001272{
1273 int ret = 0;
1274 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001275 u32 val;
1276
1277 /* Soft Reset */
Nobuhiro Iwamatsu5cee1d32012-06-25 17:35:12 +00001278 ret = sh_eth_reset(ndev);
1279 if (ret)
Laurent Pinchartf738a132014-03-20 15:00:35 +01001280 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001281
Simon Horman55754f12013-07-23 10:18:04 +09001282 if (mdp->cd->rmiimode)
1283 sh_eth_write(ndev, 0x1, RMIIMODE);
1284
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001285 /* Descriptor format */
1286 sh_eth_ring_format(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001287 if (mdp->cd->rpadir)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001288 sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001289
1290 /* all sh_eth int mask */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001291 sh_eth_write(ndev, 0, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001292
Yoshihiro Shimoda10b91942012-03-29 19:32:08 +00001293#if defined(__LITTLE_ENDIAN)
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001294 if (mdp->cd->hw_swap)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001295 sh_eth_write(ndev, EDMR_EL, EDMR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001296 else
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001297#endif
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001298 sh_eth_write(ndev, 0, EDMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001299
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001300 /* FIFO size set */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001301 sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
1302 sh_eth_write(ndev, 0, TFTR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001303
Ben Dooks530aa2d2014-06-03 12:21:13 +01001304 /* Frame recv control (enable multiple-packets per rx irq) */
1305 sh_eth_write(ndev, RMCR_RNC, RMCR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001306
Yoshihiro Shimoda2ecbb782012-06-26 19:59:58 +00001307 sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001308
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001309 if (mdp->cd->bculr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001310 sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001311
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001312 sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001313
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001314 if (!mdp->cd->no_trimd)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001315 sh_eth_write(ndev, 0, TRIMD);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001316
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001317 /* Recv frame limit set register */
Yoshihiro Shimodafdb37a72012-02-06 23:55:15 +00001318 sh_eth_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN,
1319 RFLR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001320
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001321 sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001322 if (start)
1323 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001324
1325 /* PAUSE Prohibition */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001326 val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001327 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
1328
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001329 sh_eth_write(ndev, val, ECMR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001330
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001331 if (mdp->cd->set_rate)
1332 mdp->cd->set_rate(ndev);
1333
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001334 /* E-MAC Status Register clear */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001335 sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001336
1337 /* E-MAC Interrupt Enable register */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001338 if (start)
1339 sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001340
1341 /* Set MAC address */
1342 update_mac_address(ndev);
1343
1344 /* mask reset */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001345 if (mdp->cd->apr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001346 sh_eth_write(ndev, APR_AP, APR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001347 if (mdp->cd->mpr)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001348 sh_eth_write(ndev, MPR_MP, MPR);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001349 if (mdp->cd->tpauser)
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001350 sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001351
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001352 if (start) {
1353 /* Setting the Rx mode will start the Rx process. */
1354 sh_eth_write(ndev, EDRRR_R, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001355
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001356 netif_start_queue(ndev);
1357 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001358
1359 return ret;
1360}
1361
1362/* free Tx skb function */
1363static int sh_eth_txfree(struct net_device *ndev)
1364{
1365 struct sh_eth_private *mdp = netdev_priv(ndev);
1366 struct sh_eth_txdesc *txdesc;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001367 int free_num = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001368 int entry = 0;
1369
1370 for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001371 entry = mdp->dirty_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001372 txdesc = &mdp->tx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001373 if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001374 break;
1375 /* Free the original skb. */
1376 if (mdp->tx_skbuff[entry]) {
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00001377 dma_unmap_single(&ndev->dev, txdesc->addr,
1378 txdesc->buffer_length, DMA_TO_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001379 dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
1380 mdp->tx_skbuff[entry] = NULL;
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001381 free_num++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001382 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001383 txdesc->status = cpu_to_edmac(mdp, TD_TFP);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001384 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04001385 txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001386
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001387 ndev->stats.tx_packets++;
1388 ndev->stats.tx_bytes += txdesc->buffer_length;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001389 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001390 return free_num;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001391}
1392
1393/* Packet receive function */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001394static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001395{
1396 struct sh_eth_private *mdp = netdev_priv(ndev);
1397 struct sh_eth_rxdesc *rxdesc;
1398
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001399 int entry = mdp->cur_rx % mdp->num_rx_ring;
1400 int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001401 struct sk_buff *skb;
1402 u16 pkt_len = 0;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001403 u32 desc_status;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001404
1405 rxdesc = &mdp->rx_ring[entry];
Yoshinori Sato71557a32008-08-06 19:49:00 -04001406 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
1407 desc_status = edmac_to_cpu(mdp, rxdesc->status);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001408 pkt_len = rxdesc->frame_length;
1409
1410 if (--boguscnt < 0)
1411 break;
1412
Yoshihiro Shimoda4f809ce2014-06-10 09:40:14 +09001413 if (*quota <= 0)
Sergei Shtylyov37191092013-06-19 23:30:23 +04001414 break;
Yoshihiro Shimoda4f809ce2014-06-10 09:40:14 +09001415
Sergei Shtylyov37191092013-06-19 23:30:23 +04001416 (*quota)--;
1417
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001418 if (!(desc_status & RDFEND))
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001419 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001420
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001421 /* In case of almost all GETHER/ETHERs, the Receive Frame State
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001422 * (RFS) bits in the Receive Descriptor 0 are from bit 9 to
Simon Hormandb893472014-01-17 09:22:28 +09001423 * bit 0. However, in case of the R8A7740, R8A779x, and
1424 * R7S72100 the RFS bits are from bit 25 to bit 16. So, the
1425 * driver needs right shifting by 16.
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001426 */
Sergei Shtylyovac8025a2013-06-13 22:12:45 +04001427 if (mdp->cd->shift_rd0)
1428 desc_status >>= 16;
Yoshihiro Shimodadd019892013-06-13 10:15:45 +09001429
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001430 if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
1431 RD_RFS5 | RD_RFS6 | RD_RFS10)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001432 ndev->stats.rx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001433 if (desc_status & RD_RFS1)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001434 ndev->stats.rx_crc_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001435 if (desc_status & RD_RFS2)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001436 ndev->stats.rx_frame_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001437 if (desc_status & RD_RFS3)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001438 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001439 if (desc_status & RD_RFS4)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001440 ndev->stats.rx_length_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001441 if (desc_status & RD_RFS6)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001442 ndev->stats.rx_missed_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001443 if (desc_status & RD_RFS10)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001444 ndev->stats.rx_over_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001445 } else {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001446 if (!mdp->cd->hw_swap)
1447 sh_eth_soft_swap(
1448 phys_to_virt(ALIGN(rxdesc->addr, 4)),
1449 pkt_len + 2);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001450 skb = mdp->rx_skbuff[entry];
1451 mdp->rx_skbuff[entry] = NULL;
Magnus Damm503914c2009-12-15 21:16:55 -08001452 if (mdp->cd->rpadir)
1453 skb_reserve(skb, NET_IP_ALIGN);
Kouei Abe7db8e0c2013-08-30 12:41:07 +09001454 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1455 mdp->rx_buf_sz,
1456 DMA_FROM_DEVICE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001457 skb_put(skb, pkt_len);
1458 skb->protocol = eth_type_trans(skb, ndev);
Sergei Shtylyova8e9fd02013-09-03 03:03:10 +04001459 netif_receive_skb(skb);
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001460 ndev->stats.rx_packets++;
1461 ndev->stats.rx_bytes += pkt_len;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001462 }
Yoshinori Sato71557a32008-08-06 19:49:00 -04001463 rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001464 entry = (++mdp->cur_rx) % mdp->num_rx_ring;
Yoshihiro Shimoda862df492009-05-24 23:53:40 +00001465 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001466 }
1467
1468 /* Refill the Rx ring buffers. */
1469 for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001470 entry = mdp->dirty_rx % mdp->num_rx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001471 rxdesc = &mdp->rx_ring[entry];
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001472 /* The size of the buffer is 16 byte boundary. */
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001473 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001474
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001475 if (mdp->rx_skbuff[entry] == NULL) {
Pradeep A. Dalvidae2e9f2012-02-06 11:16:13 +00001476 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001477 mdp->rx_skbuff[entry] = skb;
1478 if (skb == NULL)
1479 break; /* Better luck next round. */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001480 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001481 DMA_FROM_DEVICE);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001482 sh_eth_set_receive_align(skb);
1483
Eric Dumazetbc8acf22010-09-02 13:07:41 -07001484 skb_checksum_none_assert(skb);
Yoshihiro Shimoda0029d642009-05-24 23:53:20 +00001485 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001486 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001487 if (entry >= mdp->num_rx_ring - 1)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001488 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001489 cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001490 else
1491 rxdesc->status |=
Yoshinori Sato71557a32008-08-06 19:49:00 -04001492 cpu_to_edmac(mdp, RD_RACT | RD_RFP);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001493 }
1494
1495 /* Restart Rx engine if stopped. */
1496 /* If we don't need to check status, don't. -KDU */
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001497 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
Yoshihiro Shimodaa18e08b2012-06-20 15:26:34 +00001498 /* fix the values for the next receiving if RDE is set */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001499 if (intr_status & EESR_RDE) {
1500 u32 count = (sh_eth_read(ndev, RDFAR) -
1501 sh_eth_read(ndev, RDLAR)) >> 4;
1502
1503 mdp->cur_rx = count;
1504 mdp->dirty_rx = count;
1505 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001506 sh_eth_write(ndev, EDRRR_R, EDRRR);
Yoshihiro Shimoda79fba9f2012-05-28 23:07:55 +00001507 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001508
Yoshihiro Shimoda4f809ce2014-06-10 09:40:14 +09001509 return *quota <= 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001510}
1511
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001512static void sh_eth_rcv_snd_disable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001513{
1514 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001515 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) &
1516 ~(ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001517}
1518
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001519static void sh_eth_rcv_snd_enable(struct net_device *ndev)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001520{
1521 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001522 sh_eth_write(ndev, sh_eth_read(ndev, ECMR) |
1523 (ECMR_RE | ECMR_TE), ECMR);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001524}
1525
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001526/* error control function */
1527static void sh_eth_error(struct net_device *ndev, int intr_status)
1528{
1529 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001530 u32 felic_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001531 u32 link_stat;
1532 u32 mask;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001533
1534 if (intr_status & EESR_ECI) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001535 felic_stat = sh_eth_read(ndev, ECSR);
1536 sh_eth_write(ndev, felic_stat, ECSR); /* clear int */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001537 if (felic_stat & ECSR_ICD)
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001538 ndev->stats.tx_carrier_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001539 if (felic_stat & ECSR_LCHNG) {
1540 /* Link Changed */
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001541 if (mdp->cd->no_psr || mdp->no_ether_link) {
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001542 goto ignore_link;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001543 } else {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001544 link_stat = (sh_eth_read(ndev, PSR));
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00001545 if (mdp->ether_link_active_low)
1546 link_stat = ~link_stat;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001547 }
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001548 if (!(link_stat & PHY_ST_LINK)) {
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001549 sh_eth_rcv_snd_disable(ndev);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001550 } else {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001551 /* Link Up */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001552 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) &
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001553 ~DMAC_M_ECI, EESIPR);
1554 /* clear int */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001555 sh_eth_write(ndev, sh_eth_read(ndev, ECSR),
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001556 ECSR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001557 sh_eth_write(ndev, sh_eth_read(ndev, EESIPR) |
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001558 DMAC_M_ECI, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001559 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001560 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001561 }
1562 }
1563 }
1564
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001565ignore_link:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001566 if (intr_status & EESR_TWB) {
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001567 /* Unused write back interrupt */
1568 if (intr_status & EESR_TABT) { /* Transmit Abort int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001569 ndev->stats.tx_aborted_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001570 netif_err(mdp, tx_err, ndev, "Transmit Abort\n");
Sergei Shtylyov4eb313a2013-06-21 01:13:42 +04001571 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001572 }
1573
1574 if (intr_status & EESR_RABT) {
1575 /* Receive Abort int */
1576 if (intr_status & EESR_RFRMER) {
1577 /* Receive Frame Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001578 ndev->stats.rx_frame_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001579 netif_err(mdp, rx_err, ndev, "Receive Abort\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001580 }
1581 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001582
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001583 if (intr_status & EESR_TDE) {
1584 /* Transmit Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001585 ndev->stats.tx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001586 netif_err(mdp, tx_err, ndev, "Transmit Descriptor Empty\n");
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001587 }
1588
1589 if (intr_status & EESR_TFE) {
1590 /* FIFO under flow */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001591 ndev->stats.tx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001592 netif_err(mdp, tx_err, ndev, "Transmit FIFO Under flow\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001593 }
1594
1595 if (intr_status & EESR_RDE) {
1596 /* Receive Descriptor Empty int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001597 ndev->stats.rx_over_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001598 netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001599 }
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001600
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001601 if (intr_status & EESR_RFE) {
1602 /* Receive FIFO Overflow int */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001603 ndev->stats.rx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001604 netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001605 }
1606
1607 if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
1608 /* Address Error */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00001609 ndev->stats.tx_fifo_errors++;
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03001610 netif_err(mdp, tx_err, ndev, "Address Error\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001611 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001612
1613 mask = EESR_TWB | EESR_TABT | EESR_ADE | EESR_TDE | EESR_TFE;
1614 if (mdp->cd->no_ade)
1615 mask &= ~EESR_ADE;
1616 if (intr_status & mask) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001617 /* Tx error */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001618 u32 edtrr = sh_eth_read(ndev, EDTRR);
Sergei Shtylyov090d5602014-01-11 02:41:49 +03001619
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001620 /* dmesg */
Sergei Shtylyovda246852014-03-15 03:29:14 +03001621 netdev_err(ndev, "TX error. status=%8.8x cur_tx=%8.8x dirty_tx=%8.8x state=%8.8x EDTRR=%8.8x.\n",
1622 intr_status, mdp->cur_tx, mdp->dirty_tx,
1623 (u32)ndev->state, edtrr);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001624 /* dirty buffer free */
1625 sh_eth_txfree(ndev);
1626
1627 /* SH7712 BUG */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001628 if (edtrr ^ sh_eth_get_edtrr_trns(mdp)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001629 /* tx dma start */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00001630 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001631 }
1632 /* wakeup */
1633 netif_wake_queue(ndev);
1634 }
1635}
1636
1637static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
1638{
1639 struct net_device *ndev = netdev;
1640 struct sh_eth_private *mdp = netdev_priv(ndev);
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001641 struct sh_eth_cpu_data *cd = mdp->cd;
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001642 irqreturn_t ret = IRQ_NONE;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001643 unsigned long intr_status, intr_enable;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001644
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001645 spin_lock(&mdp->lock);
1646
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001647 /* Get interrupt status */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001648 intr_status = sh_eth_read(ndev, EESR);
Sergei Shtylyov3893b273452013-03-31 09:54:20 +00001649 /* Mask it with the interrupt mask, forcing ECI interrupt to be always
1650 * enabled since it's the one that comes thru regardless of the mask,
1651 * and we need to fully handle it in sh_eth_error() in order to quench
1652 * it as it doesn't get cleared by just writing 1 to the ECI bit...
1653 */
Sergei Shtylyov37191092013-06-19 23:30:23 +04001654 intr_enable = sh_eth_read(ndev, EESIPR);
1655 intr_status &= intr_enable | DMAC_M_ECI;
1656 if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001657 ret = IRQ_HANDLED;
Sergei Shtylyov37191092013-06-19 23:30:23 +04001658 else
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001659 goto other_irq;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001660
Sergei Shtylyov37191092013-06-19 23:30:23 +04001661 if (intr_status & EESR_RX_CHECK) {
1662 if (napi_schedule_prep(&mdp->napi)) {
1663 /* Mask Rx interrupts */
1664 sh_eth_write(ndev, intr_enable & ~EESR_RX_CHECK,
1665 EESIPR);
1666 __napi_schedule(&mdp->napi);
1667 } else {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001668 netdev_warn(ndev,
1669 "ignoring interrupt, status 0x%08lx, mask 0x%08lx.\n",
1670 intr_status, intr_enable);
Sergei Shtylyov37191092013-06-19 23:30:23 +04001671 }
1672 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001673
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09001674 /* Tx Check */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001675 if (intr_status & cd->tx_check) {
Sergei Shtylyov37191092013-06-19 23:30:23 +04001676 /* Clear Tx interrupts */
1677 sh_eth_write(ndev, intr_status & cd->tx_check, EESR);
1678
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001679 sh_eth_txfree(ndev);
1680 netif_wake_queue(ndev);
1681 }
1682
Sergei Shtylyov37191092013-06-19 23:30:23 +04001683 if (intr_status & cd->eesr_err_check) {
1684 /* Clear error interrupts */
1685 sh_eth_write(ndev, intr_status & cd->eesr_err_check, EESR);
1686
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001687 sh_eth_error(ndev, intr_status);
Sergei Shtylyov37191092013-06-19 23:30:23 +04001688 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001689
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001690other_irq:
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001691 spin_unlock(&mdp->lock);
1692
Nobuhiro Iwamatsu0e0fde32009-03-16 19:50:57 +00001693 return ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001694}
1695
Sergei Shtylyov37191092013-06-19 23:30:23 +04001696static int sh_eth_poll(struct napi_struct *napi, int budget)
1697{
1698 struct sh_eth_private *mdp = container_of(napi, struct sh_eth_private,
1699 napi);
1700 struct net_device *ndev = napi->dev;
1701 int quota = budget;
1702 unsigned long intr_status;
1703
1704 for (;;) {
1705 intr_status = sh_eth_read(ndev, EESR);
1706 if (!(intr_status & EESR_RX_CHECK))
1707 break;
1708 /* Clear Rx interrupts */
1709 sh_eth_write(ndev, intr_status & EESR_RX_CHECK, EESR);
1710
1711 if (sh_eth_rx(ndev, intr_status, &quota))
1712 goto out;
1713 }
1714
1715 napi_complete(napi);
1716
1717 /* Reenable Rx interrupts */
1718 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1719out:
1720 return budget - quota;
1721}
1722
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001723/* PHY state control function */
1724static void sh_eth_adjust_link(struct net_device *ndev)
1725{
1726 struct sh_eth_private *mdp = netdev_priv(ndev);
1727 struct phy_device *phydev = mdp->phydev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001728 int new_state = 0;
1729
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001730 if (phydev->link) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001731 if (phydev->duplex != mdp->duplex) {
1732 new_state = 1;
1733 mdp->duplex = phydev->duplex;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001734 if (mdp->cd->set_duplex)
1735 mdp->cd->set_duplex(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001736 }
1737
1738 if (phydev->speed != mdp->speed) {
1739 new_state = 1;
1740 mdp->speed = phydev->speed;
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001741 if (mdp->cd->set_rate)
1742 mdp->cd->set_rate(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001743 }
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001744 if (!mdp->link) {
Yoshihiro Shimoda91a56152011-07-05 20:33:51 +00001745 sh_eth_write(ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001746 sh_eth_read(ndev, ECMR) & ~ECMR_TXF,
1747 ECMR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001748 new_state = 1;
1749 mdp->link = phydev->link;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001750 if (mdp->cd->no_psr || mdp->no_ether_link)
1751 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001752 }
1753 } else if (mdp->link) {
1754 new_state = 1;
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001755 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001756 mdp->speed = 0;
1757 mdp->duplex = -1;
Sergei Shtylyov1e1b8122013-03-31 09:50:07 +00001758 if (mdp->cd->no_psr || mdp->no_ether_link)
1759 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001760 }
1761
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001762 if (new_state && netif_msg_link(mdp))
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001763 phy_print_status(phydev);
1764}
1765
1766/* PHY init function */
1767static int sh_eth_phy_init(struct net_device *ndev)
1768{
Ben Dooks702eca02014-03-12 17:47:40 +00001769 struct device_node *np = ndev->dev.parent->of_node;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001770 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001771 struct phy_device *phydev = NULL;
1772
Sergei Shtylyov3340d2a2013-03-31 10:11:04 +00001773 mdp->link = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001774 mdp->speed = 0;
1775 mdp->duplex = -1;
1776
1777 /* Try connect to PHY */
Ben Dooks702eca02014-03-12 17:47:40 +00001778 if (np) {
1779 struct device_node *pn;
1780
1781 pn = of_parse_phandle(np, "phy-handle", 0);
1782 phydev = of_phy_connect(ndev, pn,
1783 sh_eth_adjust_link, 0,
1784 mdp->phy_interface);
1785
1786 if (!phydev)
1787 phydev = ERR_PTR(-ENOENT);
1788 } else {
1789 char phy_id[MII_BUS_ID_SIZE + 3];
1790
1791 snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
1792 mdp->mii_bus->id, mdp->phy_id);
1793
1794 phydev = phy_connect(ndev, phy_id, sh_eth_adjust_link,
1795 mdp->phy_interface);
1796 }
1797
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001798 if (IS_ERR(phydev)) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001799 netdev_err(ndev, "failed to connect PHY\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001800 return PTR_ERR(phydev);
1801 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00001802
Sergei Shtylyovda246852014-03-15 03:29:14 +03001803 netdev_info(ndev, "attached PHY %d (IRQ %d) to driver %s\n",
1804 phydev->addr, phydev->irq, phydev->drv->name);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001805
1806 mdp->phydev = phydev;
1807
1808 return 0;
1809}
1810
1811/* PHY control start function */
1812static int sh_eth_phy_start(struct net_device *ndev)
1813{
1814 struct sh_eth_private *mdp = netdev_priv(ndev);
1815 int ret;
1816
1817 ret = sh_eth_phy_init(ndev);
1818 if (ret)
1819 return ret;
1820
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07001821 phy_start(mdp->phydev);
1822
1823 return 0;
1824}
1825
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001826static int sh_eth_get_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001827 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001828{
1829 struct sh_eth_private *mdp = netdev_priv(ndev);
1830 unsigned long flags;
1831 int ret;
1832
1833 spin_lock_irqsave(&mdp->lock, flags);
1834 ret = phy_ethtool_gset(mdp->phydev, ecmd);
1835 spin_unlock_irqrestore(&mdp->lock, flags);
1836
1837 return ret;
1838}
1839
1840static int sh_eth_set_settings(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001841 struct ethtool_cmd *ecmd)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001842{
1843 struct sh_eth_private *mdp = netdev_priv(ndev);
1844 unsigned long flags;
1845 int ret;
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001846
1847 spin_lock_irqsave(&mdp->lock, flags);
1848
1849 /* disable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001850 sh_eth_rcv_snd_disable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001851
1852 ret = phy_ethtool_sset(mdp->phydev, ecmd);
1853 if (ret)
1854 goto error_exit;
1855
1856 if (ecmd->duplex == DUPLEX_FULL)
1857 mdp->duplex = 1;
1858 else
1859 mdp->duplex = 0;
1860
1861 if (mdp->cd->set_duplex)
1862 mdp->cd->set_duplex(ndev);
1863
1864error_exit:
1865 mdelay(1);
1866
1867 /* enable tx and rx */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00001868 sh_eth_rcv_snd_enable(ndev);
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001869
1870 spin_unlock_irqrestore(&mdp->lock, flags);
1871
1872 return ret;
1873}
1874
1875static int sh_eth_nway_reset(struct net_device *ndev)
1876{
1877 struct sh_eth_private *mdp = netdev_priv(ndev);
1878 unsigned long flags;
1879 int ret;
1880
1881 spin_lock_irqsave(&mdp->lock, flags);
1882 ret = phy_start_aneg(mdp->phydev);
1883 spin_unlock_irqrestore(&mdp->lock, flags);
1884
1885 return ret;
1886}
1887
1888static u32 sh_eth_get_msglevel(struct net_device *ndev)
1889{
1890 struct sh_eth_private *mdp = netdev_priv(ndev);
1891 return mdp->msg_enable;
1892}
1893
1894static void sh_eth_set_msglevel(struct net_device *ndev, u32 value)
1895{
1896 struct sh_eth_private *mdp = netdev_priv(ndev);
1897 mdp->msg_enable = value;
1898}
1899
1900static const char sh_eth_gstrings_stats[][ETH_GSTRING_LEN] = {
1901 "rx_current", "tx_current",
1902 "rx_dirty", "tx_dirty",
1903};
1904#define SH_ETH_STATS_LEN ARRAY_SIZE(sh_eth_gstrings_stats)
1905
1906static int sh_eth_get_sset_count(struct net_device *netdev, int sset)
1907{
1908 switch (sset) {
1909 case ETH_SS_STATS:
1910 return SH_ETH_STATS_LEN;
1911 default:
1912 return -EOPNOTSUPP;
1913 }
1914}
1915
1916static void sh_eth_get_ethtool_stats(struct net_device *ndev,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001917 struct ethtool_stats *stats, u64 *data)
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001918{
1919 struct sh_eth_private *mdp = netdev_priv(ndev);
1920 int i = 0;
1921
1922 /* device-specific stats */
1923 data[i++] = mdp->cur_rx;
1924 data[i++] = mdp->cur_tx;
1925 data[i++] = mdp->dirty_rx;
1926 data[i++] = mdp->dirty_tx;
1927}
1928
1929static void sh_eth_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
1930{
1931 switch (stringset) {
1932 case ETH_SS_STATS:
1933 memcpy(data, *sh_eth_gstrings_stats,
Sergei Shtylyov128296f2014-01-03 15:52:22 +03001934 sizeof(sh_eth_gstrings_stats));
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00001935 break;
1936 }
1937}
1938
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001939static void sh_eth_get_ringparam(struct net_device *ndev,
1940 struct ethtool_ringparam *ring)
1941{
1942 struct sh_eth_private *mdp = netdev_priv(ndev);
1943
1944 ring->rx_max_pending = RX_RING_MAX;
1945 ring->tx_max_pending = TX_RING_MAX;
1946 ring->rx_pending = mdp->num_rx_ring;
1947 ring->tx_pending = mdp->num_tx_ring;
1948}
1949
1950static int sh_eth_set_ringparam(struct net_device *ndev,
1951 struct ethtool_ringparam *ring)
1952{
1953 struct sh_eth_private *mdp = netdev_priv(ndev);
1954 int ret;
1955
1956 if (ring->tx_pending > TX_RING_MAX ||
1957 ring->rx_pending > RX_RING_MAX ||
1958 ring->tx_pending < TX_RING_MIN ||
1959 ring->rx_pending < RX_RING_MIN)
1960 return -EINVAL;
1961 if (ring->rx_mini_pending || ring->rx_jumbo_pending)
1962 return -EINVAL;
1963
1964 if (netif_running(ndev)) {
1965 netif_tx_disable(ndev);
1966 /* Disable interrupts by clearing the interrupt mask. */
1967 sh_eth_write(ndev, 0x0000, EESIPR);
1968 /* Stop the chip's Tx and Rx processes. */
1969 sh_eth_write(ndev, 0, EDTRR);
1970 sh_eth_write(ndev, 0, EDRRR);
1971 synchronize_irq(ndev->irq);
1972 }
1973
1974 /* Free all the skbuffs in the Rx queue. */
1975 sh_eth_ring_free(ndev);
1976 /* Free DMA buffer */
1977 sh_eth_free_dma_buffer(mdp);
1978
1979 /* Set new parameters */
1980 mdp->num_rx_ring = ring->rx_pending;
1981 mdp->num_tx_ring = ring->tx_pending;
1982
1983 ret = sh_eth_ring_init(ndev);
1984 if (ret < 0) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001985 netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001986 return ret;
1987 }
1988 ret = sh_eth_dev_init(ndev, false);
1989 if (ret < 0) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03001990 netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00001991 return ret;
1992 }
1993
1994 if (netif_running(ndev)) {
1995 sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
1996 /* Setting the Rx mode will start the Rx process. */
1997 sh_eth_write(ndev, EDRRR_R, EDRRR);
1998 netif_wake_queue(ndev);
1999 }
2000
2001 return 0;
2002}
2003
stephen hemminger9b07be42012-01-04 12:59:49 +00002004static const struct ethtool_ops sh_eth_ethtool_ops = {
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002005 .get_settings = sh_eth_get_settings,
2006 .set_settings = sh_eth_set_settings,
stephen hemminger9b07be42012-01-04 12:59:49 +00002007 .nway_reset = sh_eth_nway_reset,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002008 .get_msglevel = sh_eth_get_msglevel,
2009 .set_msglevel = sh_eth_set_msglevel,
stephen hemminger9b07be42012-01-04 12:59:49 +00002010 .get_link = ethtool_op_get_link,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002011 .get_strings = sh_eth_get_strings,
2012 .get_ethtool_stats = sh_eth_get_ethtool_stats,
2013 .get_sset_count = sh_eth_get_sset_count,
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002014 .get_ringparam = sh_eth_get_ringparam,
2015 .set_ringparam = sh_eth_set_ringparam,
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002016};
2017
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002018/* network device open function */
2019static int sh_eth_open(struct net_device *ndev)
2020{
2021 int ret = 0;
2022 struct sh_eth_private *mdp = netdev_priv(ndev);
2023
Magnus Dammbcd51492009-10-09 00:20:04 +00002024 pm_runtime_get_sync(&mdp->pdev->dev);
2025
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002026 napi_enable(&mdp->napi);
2027
Joe Perchesa0607fd2009-11-18 23:29:17 -08002028 ret = request_irq(ndev->irq, sh_eth_interrupt,
Nobuhiro Iwamatsu5b3dfd12013-06-06 09:49:30 +00002029 mdp->cd->irq_flags, ndev->name, ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002030 if (ret) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03002031 netdev_err(ndev, "Can not assign IRQ number\n");
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002032 goto out_napi_off;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002033 }
2034
2035 /* Descriptor set */
2036 ret = sh_eth_ring_init(ndev);
2037 if (ret)
2038 goto out_free_irq;
2039
2040 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002041 ret = sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002042 if (ret)
2043 goto out_free_irq;
2044
2045 /* PHY control start*/
2046 ret = sh_eth_phy_start(ndev);
2047 if (ret)
2048 goto out_free_irq;
2049
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002050 return ret;
2051
2052out_free_irq:
2053 free_irq(ndev->irq, ndev);
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002054out_napi_off:
2055 napi_disable(&mdp->napi);
Magnus Dammbcd51492009-10-09 00:20:04 +00002056 pm_runtime_put_sync(&mdp->pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002057 return ret;
2058}
2059
2060/* Timeout function */
2061static void sh_eth_tx_timeout(struct net_device *ndev)
2062{
2063 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002064 struct sh_eth_rxdesc *rxdesc;
2065 int i;
2066
2067 netif_stop_queue(ndev);
2068
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03002069 netif_err(mdp, timer, ndev,
2070 "transmit timed out, status %8.8x, resetting...\n",
2071 (int)sh_eth_read(ndev, EESR));
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002072
2073 /* tx_errors count up */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002074 ndev->stats.tx_errors++;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002075
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002076 /* Free all the skbuffs in the Rx queue. */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002077 for (i = 0; i < mdp->num_rx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002078 rxdesc = &mdp->rx_ring[i];
2079 rxdesc->status = 0;
2080 rxdesc->addr = 0xBADF00D0;
2081 if (mdp->rx_skbuff[i])
2082 dev_kfree_skb(mdp->rx_skbuff[i]);
2083 mdp->rx_skbuff[i] = NULL;
2084 }
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002085 for (i = 0; i < mdp->num_tx_ring; i++) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002086 if (mdp->tx_skbuff[i])
2087 dev_kfree_skb(mdp->tx_skbuff[i]);
2088 mdp->tx_skbuff[i] = NULL;
2089 }
2090
2091 /* device init */
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002092 sh_eth_dev_init(ndev, true);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002093}
2094
2095/* Packet transmit function */
2096static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2097{
2098 struct sh_eth_private *mdp = netdev_priv(ndev);
2099 struct sh_eth_txdesc *txdesc;
2100 u32 entry;
Nobuhiro Iwamatsufb5e2f92008-11-17 20:29:58 +00002101 unsigned long flags;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002102
2103 spin_lock_irqsave(&mdp->lock, flags);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002104 if ((mdp->cur_tx - mdp->dirty_tx) >= (mdp->num_tx_ring - 4)) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002105 if (!sh_eth_txfree(ndev)) {
Sergei Shtylyov8d5009f2014-03-15 03:30:59 +03002106 netif_warn(mdp, tx_queued, ndev, "TxFD exhausted.\n");
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002107 netif_stop_queue(ndev);
2108 spin_unlock_irqrestore(&mdp->lock, flags);
Patrick McHardy5b548142009-06-12 06:22:29 +00002109 return NETDEV_TX_BUSY;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002110 }
2111 }
2112 spin_unlock_irqrestore(&mdp->lock, flags);
2113
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002114 entry = mdp->cur_tx % mdp->num_tx_ring;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002115 mdp->tx_skbuff[entry] = skb;
2116 txdesc = &mdp->tx_ring[entry];
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002117 /* soft swap. */
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002118 if (!mdp->cd->hw_swap)
2119 sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
2120 skb->len + 2);
Yoshihiro Shimoda31fcb992011-06-30 22:52:13 +00002121 txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
2122 DMA_TO_DEVICE);
Sergei Shtylyov730c8c62014-02-14 03:05:42 +03002123 if (skb->len < ETH_ZLEN)
2124 txdesc->buffer_length = ETH_ZLEN;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002125 else
2126 txdesc->buffer_length = skb->len;
2127
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002128 if (entry >= mdp->num_tx_ring - 1)
Yoshinori Sato71557a32008-08-06 19:49:00 -04002129 txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002130 else
Yoshinori Sato71557a32008-08-06 19:49:00 -04002131 txdesc->status |= cpu_to_edmac(mdp, TD_TACT);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002132
2133 mdp->cur_tx++;
2134
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002135 if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
2136 sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);
Nobuhiro Iwamatsub0ca2a22008-06-30 11:08:17 +09002137
Patrick McHardy6ed10652009-06-23 06:03:08 +00002138 return NETDEV_TX_OK;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002139}
2140
2141/* device close function */
2142static int sh_eth_close(struct net_device *ndev)
2143{
2144 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002145
2146 netif_stop_queue(ndev);
2147
2148 /* Disable interrupts by clearing the interrupt mask. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002149 sh_eth_write(ndev, 0x0000, EESIPR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002150
2151 /* Stop the chip's Tx and Rx processes. */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002152 sh_eth_write(ndev, 0, EDTRR);
2153 sh_eth_write(ndev, 0, EDRRR);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002154
2155 /* PHY Disconnect */
2156 if (mdp->phydev) {
2157 phy_stop(mdp->phydev);
2158 phy_disconnect(mdp->phydev);
2159 }
2160
2161 free_irq(ndev->irq, ndev);
2162
Sergei Shtylyovd2779e92013-09-04 02:41:27 +04002163 napi_disable(&mdp->napi);
2164
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002165 /* Free all the skbuffs in the Rx queue. */
2166 sh_eth_ring_free(ndev);
2167
2168 /* free DMA buffer */
Yoshihiro Shimoda91c77552012-06-26 20:00:01 +00002169 sh_eth_free_dma_buffer(mdp);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002170
Magnus Dammbcd51492009-10-09 00:20:04 +00002171 pm_runtime_put_sync(&mdp->pdev->dev);
2172
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002173 return 0;
2174}
2175
2176static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2177{
2178 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002179
Simon Hormandb893472014-01-17 09:22:28 +09002180 if (sh_eth_is_rz_fast_ether(mdp))
2181 return &ndev->stats;
2182
Magnus Dammbcd51492009-10-09 00:20:04 +00002183 pm_runtime_get_sync(&mdp->pdev->dev);
2184
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002185 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002186 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002187 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002188 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002189 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002190 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002191 if (sh_eth_is_gether(mdp)) {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002192 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002193 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002194 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002195 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2196 } else {
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002197 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002198 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2199 }
Magnus Dammbcd51492009-10-09 00:20:04 +00002200 pm_runtime_put_sync(&mdp->pdev->dev);
2201
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002202 return &ndev->stats;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002203}
2204
Eric Dumazetbb7d92e2012-02-06 22:17:21 +00002205/* ioctl to device function */
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002206static int sh_eth_do_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002207{
2208 struct sh_eth_private *mdp = netdev_priv(ndev);
2209 struct phy_device *phydev = mdp->phydev;
2210
2211 if (!netif_running(ndev))
2212 return -EINVAL;
2213
2214 if (!phydev)
2215 return -ENODEV;
2216
Richard Cochran28b04112010-07-17 08:48:55 +00002217 return phy_mii_ioctl(phydev, rq, cmd);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002218}
2219
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002220/* For TSU_POSTn. Please refer to the manual about this (strange) bitfields */
2221static void *sh_eth_tsu_get_post_reg_offset(struct sh_eth_private *mdp,
2222 int entry)
2223{
2224 return sh_eth_tsu_get_offset(mdp, TSU_POST1) + (entry / 8 * 4);
2225}
2226
2227static u32 sh_eth_tsu_get_post_mask(int entry)
2228{
2229 return 0x0f << (28 - ((entry % 8) * 4));
2230}
2231
2232static u32 sh_eth_tsu_get_post_bit(struct sh_eth_private *mdp, int entry)
2233{
2234 return (0x08 >> (mdp->port << 1)) << (28 - ((entry % 8) * 4));
2235}
2236
2237static void sh_eth_tsu_enable_cam_entry_post(struct net_device *ndev,
2238 int entry)
2239{
2240 struct sh_eth_private *mdp = netdev_priv(ndev);
2241 u32 tmp;
2242 void *reg_offset;
2243
2244 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2245 tmp = ioread32(reg_offset);
2246 iowrite32(tmp | sh_eth_tsu_get_post_bit(mdp, entry), reg_offset);
2247}
2248
2249static bool sh_eth_tsu_disable_cam_entry_post(struct net_device *ndev,
2250 int entry)
2251{
2252 struct sh_eth_private *mdp = netdev_priv(ndev);
2253 u32 post_mask, ref_mask, tmp;
2254 void *reg_offset;
2255
2256 reg_offset = sh_eth_tsu_get_post_reg_offset(mdp, entry);
2257 post_mask = sh_eth_tsu_get_post_mask(entry);
2258 ref_mask = sh_eth_tsu_get_post_bit(mdp, entry) & ~post_mask;
2259
2260 tmp = ioread32(reg_offset);
2261 iowrite32(tmp & ~post_mask, reg_offset);
2262
2263 /* If other port enables, the function returns "true" */
2264 return tmp & ref_mask;
2265}
2266
2267static int sh_eth_tsu_busy(struct net_device *ndev)
2268{
2269 int timeout = SH_ETH_TSU_TIMEOUT_MS * 100;
2270 struct sh_eth_private *mdp = netdev_priv(ndev);
2271
2272 while ((sh_eth_tsu_read(mdp, TSU_ADSBSY) & TSU_ADSBSY_0)) {
2273 udelay(10);
2274 timeout--;
2275 if (timeout <= 0) {
Sergei Shtylyovda246852014-03-15 03:29:14 +03002276 netdev_err(ndev, "%s: timeout\n", __func__);
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002277 return -ETIMEDOUT;
2278 }
2279 }
2280
2281 return 0;
2282}
2283
2284static int sh_eth_tsu_write_entry(struct net_device *ndev, void *reg,
2285 const u8 *addr)
2286{
2287 u32 val;
2288
2289 val = addr[0] << 24 | addr[1] << 16 | addr[2] << 8 | addr[3];
2290 iowrite32(val, reg);
2291 if (sh_eth_tsu_busy(ndev) < 0)
2292 return -EBUSY;
2293
2294 val = addr[4] << 8 | addr[5];
2295 iowrite32(val, reg + 4);
2296 if (sh_eth_tsu_busy(ndev) < 0)
2297 return -EBUSY;
2298
2299 return 0;
2300}
2301
2302static void sh_eth_tsu_read_entry(void *reg, u8 *addr)
2303{
2304 u32 val;
2305
2306 val = ioread32(reg);
2307 addr[0] = (val >> 24) & 0xff;
2308 addr[1] = (val >> 16) & 0xff;
2309 addr[2] = (val >> 8) & 0xff;
2310 addr[3] = val & 0xff;
2311 val = ioread32(reg + 4);
2312 addr[4] = (val >> 8) & 0xff;
2313 addr[5] = val & 0xff;
2314}
2315
2316
2317static int sh_eth_tsu_find_entry(struct net_device *ndev, const u8 *addr)
2318{
2319 struct sh_eth_private *mdp = netdev_priv(ndev);
2320 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2321 int i;
2322 u8 c_addr[ETH_ALEN];
2323
2324 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2325 sh_eth_tsu_read_entry(reg_offset, c_addr);
dingtianhongc4bde292013-12-30 15:41:17 +08002326 if (ether_addr_equal(addr, c_addr))
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002327 return i;
2328 }
2329
2330 return -ENOENT;
2331}
2332
2333static int sh_eth_tsu_find_empty(struct net_device *ndev)
2334{
2335 u8 blank[ETH_ALEN];
2336 int entry;
2337
2338 memset(blank, 0, sizeof(blank));
2339 entry = sh_eth_tsu_find_entry(ndev, blank);
2340 return (entry < 0) ? -ENOMEM : entry;
2341}
2342
2343static int sh_eth_tsu_disable_cam_entry_table(struct net_device *ndev,
2344 int entry)
2345{
2346 struct sh_eth_private *mdp = netdev_priv(ndev);
2347 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2348 int ret;
2349 u8 blank[ETH_ALEN];
2350
2351 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) &
2352 ~(1 << (31 - entry)), TSU_TEN);
2353
2354 memset(blank, 0, sizeof(blank));
2355 ret = sh_eth_tsu_write_entry(ndev, reg_offset + entry * 8, blank);
2356 if (ret < 0)
2357 return ret;
2358 return 0;
2359}
2360
2361static int sh_eth_tsu_add_entry(struct net_device *ndev, const u8 *addr)
2362{
2363 struct sh_eth_private *mdp = netdev_priv(ndev);
2364 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2365 int i, ret;
2366
2367 if (!mdp->cd->tsu)
2368 return 0;
2369
2370 i = sh_eth_tsu_find_entry(ndev, addr);
2371 if (i < 0) {
2372 /* No entry found, create one */
2373 i = sh_eth_tsu_find_empty(ndev);
2374 if (i < 0)
2375 return -ENOMEM;
2376 ret = sh_eth_tsu_write_entry(ndev, reg_offset + i * 8, addr);
2377 if (ret < 0)
2378 return ret;
2379
2380 /* Enable the entry */
2381 sh_eth_tsu_write(mdp, sh_eth_tsu_read(mdp, TSU_TEN) |
2382 (1 << (31 - i)), TSU_TEN);
2383 }
2384
2385 /* Entry found or created, enable POST */
2386 sh_eth_tsu_enable_cam_entry_post(ndev, i);
2387
2388 return 0;
2389}
2390
2391static int sh_eth_tsu_del_entry(struct net_device *ndev, const u8 *addr)
2392{
2393 struct sh_eth_private *mdp = netdev_priv(ndev);
2394 int i, ret;
2395
2396 if (!mdp->cd->tsu)
2397 return 0;
2398
2399 i = sh_eth_tsu_find_entry(ndev, addr);
2400 if (i) {
2401 /* Entry found */
2402 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2403 goto done;
2404
2405 /* Disable the entry if both ports was disabled */
2406 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2407 if (ret < 0)
2408 return ret;
2409 }
2410done:
2411 return 0;
2412}
2413
2414static int sh_eth_tsu_purge_all(struct net_device *ndev)
2415{
2416 struct sh_eth_private *mdp = netdev_priv(ndev);
2417 int i, ret;
2418
2419 if (unlikely(!mdp->cd->tsu))
2420 return 0;
2421
2422 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
2423 if (sh_eth_tsu_disable_cam_entry_post(ndev, i))
2424 continue;
2425
2426 /* Disable the entry if both ports was disabled */
2427 ret = sh_eth_tsu_disable_cam_entry_table(ndev, i);
2428 if (ret < 0)
2429 return ret;
2430 }
2431
2432 return 0;
2433}
2434
2435static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
2436{
2437 struct sh_eth_private *mdp = netdev_priv(ndev);
2438 u8 addr[ETH_ALEN];
2439 void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
2440 int i;
2441
2442 if (unlikely(!mdp->cd->tsu))
2443 return;
2444
2445 for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
2446 sh_eth_tsu_read_entry(reg_offset, addr);
2447 if (is_multicast_ether_addr(addr))
2448 sh_eth_tsu_del_entry(ndev, addr);
2449 }
2450}
2451
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002452/* Multicast reception directions set */
2453static void sh_eth_set_multicast_list(struct net_device *ndev)
2454{
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002455 struct sh_eth_private *mdp = netdev_priv(ndev);
2456 u32 ecmr_bits;
2457 int mcast_all = 0;
2458 unsigned long flags;
2459
2460 spin_lock_irqsave(&mdp->lock, flags);
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002461 /* Initial condition is MCT = 1, PRM = 0.
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002462 * Depending on ndev->flags, set PRM or clear MCT
2463 */
2464 ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
2465
2466 if (!(ndev->flags & IFF_MULTICAST)) {
2467 sh_eth_tsu_purge_mcast(ndev);
2468 mcast_all = 1;
2469 }
2470 if (ndev->flags & IFF_ALLMULTI) {
2471 sh_eth_tsu_purge_mcast(ndev);
2472 ecmr_bits &= ~ECMR_MCT;
2473 mcast_all = 1;
2474 }
2475
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002476 if (ndev->flags & IFF_PROMISC) {
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002477 sh_eth_tsu_purge_all(ndev);
2478 ecmr_bits = (ecmr_bits & ~ECMR_MCT) | ECMR_PRM;
2479 } else if (mdp->cd->tsu) {
2480 struct netdev_hw_addr *ha;
2481 netdev_for_each_mc_addr(ha, ndev) {
2482 if (mcast_all && is_multicast_ether_addr(ha->addr))
2483 continue;
2484
2485 if (sh_eth_tsu_add_entry(ndev, ha->addr) < 0) {
2486 if (!mcast_all) {
2487 sh_eth_tsu_purge_mcast(ndev);
2488 ecmr_bits &= ~ECMR_MCT;
2489 mcast_all = 1;
2490 }
2491 }
2492 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002493 } else {
2494 /* Normal, unicast/broadcast-only mode. */
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002495 ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002496 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002497
2498 /* update the ethernet mode */
2499 sh_eth_write(ndev, ecmr_bits, ECMR);
2500
2501 spin_unlock_irqrestore(&mdp->lock, flags);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002502}
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002503
2504static int sh_eth_get_vtag_index(struct sh_eth_private *mdp)
2505{
2506 if (!mdp->port)
2507 return TSU_VTAG0;
2508 else
2509 return TSU_VTAG1;
2510}
2511
Patrick McHardy80d5c362013-04-19 02:04:28 +00002512static int sh_eth_vlan_rx_add_vid(struct net_device *ndev,
2513 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002514{
2515 struct sh_eth_private *mdp = netdev_priv(ndev);
2516 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2517
2518 if (unlikely(!mdp->cd->tsu))
2519 return -EPERM;
2520
2521 /* No filtering if vid = 0 */
2522 if (!vid)
2523 return 0;
2524
2525 mdp->vlan_num_ids++;
2526
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002527 /* The controller has one VLAN tag HW filter. So, if the filter is
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002528 * already enabled, the driver disables it and the filte
2529 */
2530 if (mdp->vlan_num_ids > 1) {
2531 /* disable VLAN filter */
2532 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2533 return 0;
2534 }
2535
2536 sh_eth_tsu_write(mdp, TSU_VTAG_ENABLE | (vid & TSU_VTAG_VID_MASK),
2537 vtag_reg_index);
2538
2539 return 0;
2540}
2541
Patrick McHardy80d5c362013-04-19 02:04:28 +00002542static int sh_eth_vlan_rx_kill_vid(struct net_device *ndev,
2543 __be16 proto, u16 vid)
Yoshihiro Shimoda71cc7c32012-02-15 17:55:06 +00002544{
2545 struct sh_eth_private *mdp = netdev_priv(ndev);
2546 int vtag_reg_index = sh_eth_get_vtag_index(mdp);
2547
2548 if (unlikely(!mdp->cd->tsu))
2549 return -EPERM;
2550
2551 /* No filtering if vid = 0 */
2552 if (!vid)
2553 return 0;
2554
2555 mdp->vlan_num_ids--;
2556 sh_eth_tsu_write(mdp, 0, vtag_reg_index);
2557
2558 return 0;
2559}
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002560
2561/* SuperH's TSU register init function */
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002562static void sh_eth_tsu_init(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002563{
Simon Hormandb893472014-01-17 09:22:28 +09002564 if (sh_eth_is_rz_fast_ether(mdp)) {
2565 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2566 return;
2567 }
2568
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002569 sh_eth_tsu_write(mdp, 0, TSU_FWEN0); /* Disable forward(0->1) */
2570 sh_eth_tsu_write(mdp, 0, TSU_FWEN1); /* Disable forward(1->0) */
2571 sh_eth_tsu_write(mdp, 0, TSU_FCM); /* forward fifo 3k-3k */
2572 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL0);
2573 sh_eth_tsu_write(mdp, 0xc, TSU_BSYSL1);
2574 sh_eth_tsu_write(mdp, 0, TSU_PRISL0);
2575 sh_eth_tsu_write(mdp, 0, TSU_PRISL1);
2576 sh_eth_tsu_write(mdp, 0, TSU_FWSL0);
2577 sh_eth_tsu_write(mdp, 0, TSU_FWSL1);
2578 sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, TSU_FWSLC);
Yoshihiro Shimodac5ed5362011-03-07 21:59:38 +00002579 if (sh_eth_is_gether(mdp)) {
2580 sh_eth_tsu_write(mdp, 0, TSU_QTAG0); /* Disable QTAG(0->1) */
2581 sh_eth_tsu_write(mdp, 0, TSU_QTAG1); /* Disable QTAG(1->0) */
2582 } else {
2583 sh_eth_tsu_write(mdp, 0, TSU_QTAGM0); /* Disable QTAG(0->1) */
2584 sh_eth_tsu_write(mdp, 0, TSU_QTAGM1); /* Disable QTAG(1->0) */
2585 }
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002586 sh_eth_tsu_write(mdp, 0, TSU_FWSR); /* all interrupt status clear */
2587 sh_eth_tsu_write(mdp, 0, TSU_FWINMK); /* Disable all interrupt */
2588 sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
2589 sh_eth_tsu_write(mdp, 0, TSU_POST1); /* Disable CAM entry [ 0- 7] */
2590 sh_eth_tsu_write(mdp, 0, TSU_POST2); /* Disable CAM entry [ 8-15] */
2591 sh_eth_tsu_write(mdp, 0, TSU_POST3); /* Disable CAM entry [16-23] */
2592 sh_eth_tsu_write(mdp, 0, TSU_POST4); /* Disable CAM entry [24-31] */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002593}
2594
2595/* MDIO bus release function */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002596static int sh_mdio_release(struct sh_eth_private *mdp)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002597{
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002598 /* unregister mdio bus */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002599 mdiobus_unregister(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002600
2601 /* free bitbang info */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002602 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002603
2604 return 0;
2605}
2606
2607/* MDIO bus init function */
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002608static int sh_mdio_init(struct sh_eth_private *mdp,
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002609 struct sh_eth_plat_data *pd)
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002610{
2611 int ret, i;
2612 struct bb_info *bitbang;
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002613 struct platform_device *pdev = mdp->pdev;
Laurent Pinchartaa8d4222014-03-20 15:00:31 +01002614 struct device *dev = &mdp->pdev->dev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002615
2616 /* create bit control struct for PHY */
Laurent Pinchartaa8d4222014-03-20 15:00:31 +01002617 bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL);
Laurent Pinchartf738a132014-03-20 15:00:35 +01002618 if (!bitbang)
2619 return -ENOMEM;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002620
2621 /* bitbang init */
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002622 bitbang->addr = mdp->addr + mdp->reg_offset[PIR];
Yoshihiro Shimodab3017e62011-03-07 21:59:55 +00002623 bitbang->set_gate = pd->set_mdio_gate;
Sergei Shtylyovdfed5e72013-03-21 10:37:54 +00002624 bitbang->mdi_msk = PIR_MDI;
2625 bitbang->mdo_msk = PIR_MDO;
2626 bitbang->mmd_msk = PIR_MMD;
2627 bitbang->mdc_msk = PIR_MDC;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002628 bitbang->ctrl.ops = &bb_ops;
2629
Stefan Weilc2e07b32010-08-03 19:44:52 +02002630 /* MII controller setting */
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002631 mdp->mii_bus = alloc_mdio_bitbang(&bitbang->ctrl);
Laurent Pinchartf738a132014-03-20 15:00:35 +01002632 if (!mdp->mii_bus)
2633 return -ENOMEM;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002634
2635 /* Hook up MII support for ethtool */
2636 mdp->mii_bus->name = "sh_mii";
Laurent Pincharta5bd60602014-03-20 15:00:32 +01002637 mdp->mii_bus->parent = dev;
Florian Fainelli5278fb52012-01-09 23:59:17 +00002638 snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002639 pdev->name, pdev->id);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002640
2641 /* PHY IRQ */
Sergei Shtylyov86b5d252014-05-13 02:30:14 +04002642 mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
2643 GFP_KERNEL);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002644 if (!mdp->mii_bus->irq) {
2645 ret = -ENOMEM;
2646 goto out_free_bus;
2647 }
2648
Laurent Pinchartbd920ff2014-03-20 15:00:33 +01002649 /* register MDIO bus */
2650 if (dev->of_node) {
2651 ret = of_mdiobus_register(mdp->mii_bus, dev->of_node);
Ben Dooks702eca02014-03-12 17:47:40 +00002652 } else {
2653 for (i = 0; i < PHY_MAX_ADDR; i++)
2654 mdp->mii_bus->irq[i] = PHY_POLL;
2655 if (pd->phy_irq > 0)
2656 mdp->mii_bus->irq[pd->phy] = pd->phy_irq;
2657
2658 ret = mdiobus_register(mdp->mii_bus);
2659 }
2660
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002661 if (ret)
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002662 goto out_free_bus;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002663
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002664 return 0;
2665
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002666out_free_bus:
Lennert Buytenhek298cf9b2008-10-08 16:29:57 -07002667 free_mdio_bitbang(mdp->mii_bus);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002668 return ret;
2669}
2670
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002671static const u16 *sh_eth_get_register_offset(int register_type)
2672{
2673 const u16 *reg_offset = NULL;
2674
2675 switch (register_type) {
2676 case SH_ETH_REG_GIGABIT:
2677 reg_offset = sh_eth_offset_gigabit;
2678 break;
Simon Hormandb893472014-01-17 09:22:28 +09002679 case SH_ETH_REG_FAST_RZ:
2680 reg_offset = sh_eth_offset_fast_rz;
2681 break;
Sergei Shtylyova3f109b2013-03-28 11:51:31 +00002682 case SH_ETH_REG_FAST_RCAR:
2683 reg_offset = sh_eth_offset_fast_rcar;
2684 break;
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002685 case SH_ETH_REG_FAST_SH4:
2686 reg_offset = sh_eth_offset_fast_sh4;
2687 break;
2688 case SH_ETH_REG_FAST_SH3_SH2:
2689 reg_offset = sh_eth_offset_fast_sh3_sh2;
2690 break;
2691 default:
Yoshihiro Shimoda4a555302011-03-07 21:59:26 +00002692 break;
2693 }
2694
2695 return reg_offset;
2696}
2697
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002698static const struct net_device_ops sh_eth_netdev_ops = {
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002699 .ndo_open = sh_eth_open,
2700 .ndo_stop = sh_eth_close,
2701 .ndo_start_xmit = sh_eth_start_xmit,
2702 .ndo_get_stats = sh_eth_get_stats,
Alexander Beregalovebf84ea2009-04-11 07:40:49 +00002703 .ndo_tx_timeout = sh_eth_tx_timeout,
2704 .ndo_do_ioctl = sh_eth_do_ioctl,
2705 .ndo_validate_addr = eth_validate_addr,
2706 .ndo_set_mac_address = eth_mac_addr,
2707 .ndo_change_mtu = eth_change_mtu,
2708};
2709
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002710static const struct net_device_ops sh_eth_netdev_ops_tsu = {
2711 .ndo_open = sh_eth_open,
2712 .ndo_stop = sh_eth_close,
2713 .ndo_start_xmit = sh_eth_start_xmit,
2714 .ndo_get_stats = sh_eth_get_stats,
2715 .ndo_set_rx_mode = sh_eth_set_multicast_list,
2716 .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
2717 .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
2718 .ndo_tx_timeout = sh_eth_tx_timeout,
2719 .ndo_do_ioctl = sh_eth_do_ioctl,
2720 .ndo_validate_addr = eth_validate_addr,
2721 .ndo_set_mac_address = eth_mac_addr,
2722 .ndo_change_mtu = eth_change_mtu,
2723};
2724
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002725#ifdef CONFIG_OF
2726static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2727{
2728 struct device_node *np = dev->of_node;
2729 struct sh_eth_plat_data *pdata;
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002730 const char *mac_addr;
2731
2732 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2733 if (!pdata)
2734 return NULL;
2735
2736 pdata->phy_interface = of_get_phy_mode(np);
2737
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002738 mac_addr = of_get_mac_address(np);
2739 if (mac_addr)
2740 memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
2741
2742 pdata->no_ether_link =
2743 of_property_read_bool(np, "renesas,no-ether-link");
2744 pdata->ether_link_active_low =
2745 of_property_read_bool(np, "renesas,ether-link-active-low");
2746
2747 return pdata;
2748}
2749
2750static const struct of_device_id sh_eth_match_table[] = {
2751 { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
2752 { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
2753 { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
2754 { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
2755 { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
2756 { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
2757 { }
2758};
2759MODULE_DEVICE_TABLE(of, sh_eth_match_table);
2760#else
2761static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
2762{
2763 return NULL;
2764}
2765#endif
2766
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002767static int sh_eth_drv_probe(struct platform_device *pdev)
2768{
Kuninori Morimoto9c386572010-08-19 00:39:45 -07002769 int ret, devno = 0;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002770 struct resource *res;
2771 struct net_device *ndev = NULL;
Kuninori Morimotoec0d7552011-06-23 16:02:38 +00002772 struct sh_eth_private *mdp = NULL;
Jingoo Han0b76b862013-08-30 14:00:11 +09002773 struct sh_eth_plat_data *pd = dev_get_platdata(&pdev->dev);
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002774 const struct platform_device_id *id = platform_get_device_id(pdev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002775
2776 /* get base addr */
2777 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2778 if (unlikely(res == NULL)) {
2779 dev_err(&pdev->dev, "invalid resource\n");
Laurent Pinchartf738a132014-03-20 15:00:35 +01002780 return -EINVAL;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002781 }
2782
2783 ndev = alloc_etherdev(sizeof(struct sh_eth_private));
Laurent Pinchartf738a132014-03-20 15:00:35 +01002784 if (!ndev)
2785 return -ENOMEM;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002786
Ben Dooksb5893a02014-03-21 12:09:14 +01002787 pm_runtime_enable(&pdev->dev);
2788 pm_runtime_get_sync(&pdev->dev);
2789
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002790 /* The sh Ether-specific entries in the device structure. */
2791 ndev->base_addr = res->start;
2792 devno = pdev->id;
2793 if (devno < 0)
2794 devno = 0;
2795
2796 ndev->dma = -1;
roel kluincc3c0802008-09-10 19:22:44 +02002797 ret = platform_get_irq(pdev, 0);
2798 if (ret < 0) {
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002799 ret = -ENODEV;
2800 goto out_release;
2801 }
roel kluincc3c0802008-09-10 19:22:44 +02002802 ndev->irq = ret;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002803
2804 SET_NETDEV_DEV(ndev, &pdev->dev);
2805
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002806 mdp = netdev_priv(ndev);
Yoshihiro Shimoda525b8072012-06-26 20:00:03 +00002807 mdp->num_tx_ring = TX_RING_SIZE;
2808 mdp->num_rx_ring = RX_RING_SIZE;
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002809 mdp->addr = devm_ioremap_resource(&pdev->dev, res);
2810 if (IS_ERR(mdp->addr)) {
2811 ret = PTR_ERR(mdp->addr);
Yoshihiro Shimodaae706442011-09-27 21:48:58 +00002812 goto out_release;
2813 }
2814
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002815 spin_lock_init(&mdp->lock);
Magnus Dammbcd51492009-10-09 00:20:04 +00002816 mdp->pdev = pdev;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002817
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002818 if (pdev->dev.of_node)
2819 pd = sh_eth_parse_dt(&pdev->dev);
Sergei Shtylyov3b4c5cb2013-10-30 23:30:19 +03002820 if (!pd) {
2821 dev_err(&pdev->dev, "no platform data\n");
2822 ret = -EINVAL;
2823 goto out_release;
2824 }
2825
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002826 /* get PHY ID */
Yoshinori Sato71557a32008-08-06 19:49:00 -04002827 mdp->phy_id = pd->phy;
Yoshihiro Shimodae47c9052011-03-07 21:59:45 +00002828 mdp->phy_interface = pd->phy_interface;
Yoshinori Sato71557a32008-08-06 19:49:00 -04002829 /* EDMAC endian */
2830 mdp->edmac_endian = pd->edmac_endian;
Yoshihiro Shimoda49235762009-08-27 23:25:03 +00002831 mdp->no_ether_link = pd->no_ether_link;
2832 mdp->ether_link_active_low = pd->ether_link_active_low;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002833
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002834 /* set cpu data */
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002835 if (id) {
2836 mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
2837 } else {
2838 const struct of_device_id *match;
2839
2840 match = of_match_device(of_match_ptr(sh_eth_match_table),
2841 &pdev->dev);
2842 mdp->cd = (struct sh_eth_cpu_data *)match->data;
2843 }
Sergei Shtylyova3153d82013-08-18 03:11:28 +04002844 mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
Sergei Shtylyov264be2f2014-03-15 03:11:24 +03002845 if (!mdp->reg_offset) {
2846 dev_err(&pdev->dev, "Unknown register type (%d)\n",
2847 mdp->cd->register_type);
2848 ret = -EINVAL;
2849 goto out_release;
2850 }
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002851 sh_eth_set_default_cpu_data(mdp->cd);
2852
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002853 /* set function */
Sergei Shtylyov8f728d72013-06-13 00:55:34 +04002854 if (mdp->cd->tsu)
2855 ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
2856 else
2857 ndev->netdev_ops = &sh_eth_netdev_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002858 ndev->ethtool_ops = &sh_eth_ethtool_ops;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002859 ndev->watchdog_timeo = TX_TIMEOUT;
2860
Nobuhiro Iwamatsudc19e4e2011-02-15 21:17:32 +00002861 /* debug message level */
2862 mdp->msg_enable = SH_ETH_DEF_MSG_ENABLE;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002863
2864 /* read and set MAC address */
Magnus Damm748031f2009-10-09 00:17:14 +00002865 read_mac_address(ndev, pd->mac_addr);
Sergei Shtylyovff6e7222013-04-29 09:49:42 +00002866 if (!is_valid_ether_addr(ndev->dev_addr)) {
2867 dev_warn(&pdev->dev,
2868 "no valid MAC address supplied, using a random one.\n");
2869 eth_hw_addr_random(ndev);
2870 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002871
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002872 /* ioremap the TSU registers */
2873 if (mdp->cd->tsu) {
2874 struct resource *rtsu;
2875 rtsu = platform_get_resource(pdev, IORESOURCE_MEM, 1);
Sergei Shtylyovd5e07e62013-03-21 10:41:11 +00002876 mdp->tsu_addr = devm_ioremap_resource(&pdev->dev, rtsu);
2877 if (IS_ERR(mdp->tsu_addr)) {
2878 ret = PTR_ERR(mdp->tsu_addr);
Sergei Shtylyovfc0c0902013-03-19 13:41:32 +00002879 goto out_release;
2880 }
Yoshihiro Shimoda6743fe62012-02-15 17:55:03 +00002881 mdp->port = devno % 2;
Patrick McHardyf6469682013-04-19 02:04:27 +00002882 ndev->features = NETIF_F_HW_VLAN_CTAG_FILTER;
Yoshihiro Shimoda6ba88022012-02-15 17:55:01 +00002883 }
2884
Yoshihiro Shimoda150647f2012-02-15 17:54:56 +00002885 /* initialize first or needed device */
2886 if (!devno || pd->needs_init) {
Yoshihiro Shimoda380af9e2009-05-24 23:54:21 +00002887 if (mdp->cd->chip_reset)
2888 mdp->cd->chip_reset(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002889
Yoshihiro Shimoda4986b992011-03-07 21:59:34 +00002890 if (mdp->cd->tsu) {
2891 /* TSU init (Init only)*/
2892 sh_eth_tsu_init(mdp);
2893 }
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002894 }
2895
Laurent Pinchartdaacf032014-03-20 15:00:34 +01002896 /* MDIO bus init */
2897 ret = sh_mdio_init(mdp, pd);
2898 if (ret) {
2899 dev_err(&ndev->dev, "failed to initialise MDIO\n");
2900 goto out_release;
2901 }
2902
Sergei Shtylyov37191092013-06-19 23:30:23 +04002903 netif_napi_add(ndev, &mdp->napi, sh_eth_poll, 64);
2904
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002905 /* network device register */
2906 ret = register_netdev(ndev);
2907 if (ret)
Sergei Shtylyov37191092013-06-19 23:30:23 +04002908 goto out_napi_del;
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002909
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002910 /* print device information */
Sergei Shtylyovf75f14e2014-03-15 03:27:54 +03002911 netdev_info(ndev, "Base address at 0x%x, %pM, IRQ %d.\n",
2912 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002913
Ben Dooksb5893a02014-03-21 12:09:14 +01002914 pm_runtime_put(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002915 platform_set_drvdata(pdev, ndev);
2916
2917 return ret;
2918
Sergei Shtylyov37191092013-06-19 23:30:23 +04002919out_napi_del:
2920 netif_napi_del(&mdp->napi);
Laurent Pinchartdaacf032014-03-20 15:00:34 +01002921 sh_mdio_release(mdp);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002922
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002923out_release:
2924 /* net_dev free */
2925 if (ndev)
2926 free_netdev(ndev);
2927
Ben Dooksb5893a02014-03-21 12:09:14 +01002928 pm_runtime_put(&pdev->dev);
2929 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002930 return ret;
2931}
2932
2933static int sh_eth_drv_remove(struct platform_device *pdev)
2934{
2935 struct net_device *ndev = platform_get_drvdata(pdev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002936 struct sh_eth_private *mdp = netdev_priv(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002937
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002938 unregister_netdev(ndev);
Sergei Shtylyov37191092013-06-19 23:30:23 +04002939 netif_napi_del(&mdp->napi);
Laurent Pinchartdaacf032014-03-20 15:00:34 +01002940 sh_mdio_release(mdp);
Magnus Dammbcd51492009-10-09 00:20:04 +00002941 pm_runtime_disable(&pdev->dev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002942 free_netdev(ndev);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002943
2944 return 0;
2945}
2946
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002947#ifdef CONFIG_PM
Magnus Dammbcd51492009-10-09 00:20:04 +00002948static int sh_eth_runtime_nop(struct device *dev)
2949{
Sergei Shtylyov128296f2014-01-03 15:52:22 +03002950 /* Runtime PM callback shared between ->runtime_suspend()
Magnus Dammbcd51492009-10-09 00:20:04 +00002951 * and ->runtime_resume(). Simply returns success.
2952 *
2953 * This driver re-initializes all registers after
2954 * pm_runtime_get_sync() anyway so there is no need
2955 * to save and restore registers here.
2956 */
2957 return 0;
2958}
2959
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002960static const struct dev_pm_ops sh_eth_dev_pm_ops = {
Magnus Dammbcd51492009-10-09 00:20:04 +00002961 .runtime_suspend = sh_eth_runtime_nop,
2962 .runtime_resume = sh_eth_runtime_nop,
2963};
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002964#define SH_ETH_PM_OPS (&sh_eth_dev_pm_ops)
2965#else
2966#define SH_ETH_PM_OPS NULL
2967#endif
Magnus Dammbcd51492009-10-09 00:20:04 +00002968
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002969static struct platform_device_id sh_eth_id_table[] = {
Sergei Shtylyovc18a79a2013-06-07 13:56:05 +00002970 { "sh7619-ether", (kernel_ulong_t)&sh7619_data },
Sergei Shtylyov7bbe1502013-06-07 13:55:08 +00002971 { "sh771x-ether", (kernel_ulong_t)&sh771x_data },
Sergei Shtylyov9c3beaa2013-06-07 14:03:37 +00002972 { "sh7724-ether", (kernel_ulong_t)&sh7724_data },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002973 { "sh7734-gether", (kernel_ulong_t)&sh7734_data },
Sergei Shtylyov24549e22013-06-07 13:59:21 +00002974 { "sh7757-ether", (kernel_ulong_t)&sh7757_data },
2975 { "sh7757-gether", (kernel_ulong_t)&sh7757_data_giga },
Sergei Shtylyovf5d12762013-06-07 13:58:18 +00002976 { "sh7763-gether", (kernel_ulong_t)&sh7763_data },
Simon Hormandb893472014-01-17 09:22:28 +09002977 { "r7s72100-ether", (kernel_ulong_t)&r7s72100_data },
Sergei Shtylyove5c9b4c2013-06-07 13:57:12 +00002978 { "r8a7740-gether", (kernel_ulong_t)&r8a7740_data },
Sergei Shtylyov589ebde2013-06-07 14:05:59 +00002979 { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
Sergei Shtylyov94a12b12013-12-08 02:59:18 +03002980 { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
2981 { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002982 { }
2983};
2984MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
2985
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002986static struct platform_driver sh_eth_driver = {
2987 .probe = sh_eth_drv_probe,
2988 .remove = sh_eth_drv_remove,
Sergei Shtylyovafe391a2013-06-07 13:54:02 +00002989 .id_table = sh_eth_id_table,
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002990 .driver = {
2991 .name = CARDNAME,
Nobuhiro Iwamatsu540ad1b2013-06-06 09:52:37 +00002992 .pm = SH_ETH_PM_OPS,
Sergei Shtylyovb356e972014-02-18 03:12:43 +03002993 .of_match_table = of_match_ptr(sh_eth_match_table),
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002994 },
2995};
2996
Axel Lindb62f682011-11-27 16:44:17 +00002997module_platform_driver(sh_eth_driver);
Nobuhiro Iwamatsu86a74ff2008-06-09 16:33:56 -07002998
2999MODULE_AUTHOR("Nobuhiro Iwamatsu, Yoshihiro Shimoda");
3000MODULE_DESCRIPTION("Renesas SuperH Ethernet driver");
3001MODULE_LICENSE("GPL v2");