blob: cabf97728b2dc2348f6db4822374f54d0829962c [file] [log] [blame]
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001/*
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002 * Keystone GBE and XGBE subsystem code
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003 *
4 * Copyright (C) 2014 Texas Instruments Incorporated
5 * Authors: Sandeep Nair <sandeep_n@ti.com>
6 * Sandeep Paulraj <s-paulraj@ti.com>
7 * Cyril Chemparathy <cyril@ti.com>
8 * Santosh Shilimkar <santosh.shilimkar@ti.com>
9 * Wingman Kwok <w-kwok2@ti.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation version 2.
14 *
15 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
16 * kind, whether express or implied; without even the implied warranty
17 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 */
20
21#include <linux/io.h>
Karicheri, Muralidharan58c11b52015-01-29 18:15:51 -050022#include <linux/module.h>
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050023#include <linux/of_mdio.h>
24#include <linux/of_address.h>
25#include <linux/if_vlan.h>
26#include <linux/ethtool.h>
27
28#include "cpsw_ale.h"
29#include "netcp.h"
30
31#define NETCP_DRIVER_NAME "TI KeyStone Ethernet Driver"
32#define NETCP_DRIVER_VERSION "v1.0"
33
34#define GBE_IDENT(reg) ((reg >> 16) & 0xffff)
35#define GBE_MAJOR_VERSION(reg) (reg >> 8 & 0x7)
36#define GBE_MINOR_VERSION(reg) (reg & 0xff)
37#define GBE_RTL_VERSION(reg) ((reg >> 11) & 0x1f)
38
39/* 1G Ethernet SS defines */
40#define GBE_MODULE_NAME "netcp-gbe"
41#define GBE_SS_VERSION_14 0x4ed21104
42
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040043#define GBE_SS_REG_INDEX 0
44#define GBE_SGMII34_REG_INDEX 1
45#define GBE_SM_REG_INDEX 2
46/* offset relative to base of GBE_SS_REG_INDEX */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050047#define GBE13_SGMII_MODULE_OFFSET 0x100
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040048/* offset relative to base of GBE_SM_REG_INDEX */
49#define GBE13_HOST_PORT_OFFSET 0x34
50#define GBE13_SLAVE_PORT_OFFSET 0x60
51#define GBE13_EMAC_OFFSET 0x100
52#define GBE13_SLAVE_PORT2_OFFSET 0x200
53#define GBE13_HW_STATS_OFFSET 0x300
54#define GBE13_ALE_OFFSET 0x600
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050055#define GBE13_HOST_PORT_NUM 0
Wingman Kwok6f8d3f32015-01-15 19:12:51 -050056#define GBE13_NUM_ALE_ENTRIES 1024
57
WingMan Kwok9a391c72015-03-20 16:11:25 -040058/* 1G Ethernet NU SS defines */
59#define GBENU_MODULE_NAME "netcp-gbenu"
60#define GBE_SS_ID_NU 0x4ee6
61#define GBE_SS_ID_2U 0x4ee8
62
63#define IS_SS_ID_MU(d) \
64 ((GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU) || \
65 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_2U))
66
67#define IS_SS_ID_NU(d) \
68 (GBE_IDENT((d)->ss_version) == GBE_SS_ID_NU)
69
70#define GBENU_SS_REG_INDEX 0
71#define GBENU_SM_REG_INDEX 1
72#define GBENU_SGMII_MODULE_OFFSET 0x100
73#define GBENU_HOST_PORT_OFFSET 0x1000
74#define GBENU_SLAVE_PORT_OFFSET 0x2000
75#define GBENU_EMAC_OFFSET 0x2330
76#define GBENU_HW_STATS_OFFSET 0x1a000
77#define GBENU_ALE_OFFSET 0x1e000
78#define GBENU_HOST_PORT_NUM 0
79#define GBENU_NUM_ALE_ENTRIES 1024
80
Wingman Kwok90cff9e2015-01-15 19:12:52 -050081/* 10G Ethernet SS defines */
82#define XGBE_MODULE_NAME "netcp-xgbe"
83#define XGBE_SS_VERSION_10 0x4ee42100
84
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040085#define XGBE_SS_REG_INDEX 0
86#define XGBE_SM_REG_INDEX 1
87#define XGBE_SERDES_REG_INDEX 2
88
89/* offset relative to base of XGBE_SS_REG_INDEX */
Wingman Kwok90cff9e2015-01-15 19:12:52 -050090#define XGBE10_SGMII_MODULE_OFFSET 0x100
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -040091/* offset relative to base of XGBE_SM_REG_INDEX */
92#define XGBE10_HOST_PORT_OFFSET 0x34
93#define XGBE10_SLAVE_PORT_OFFSET 0x64
94#define XGBE10_EMAC_OFFSET 0x400
95#define XGBE10_ALE_OFFSET 0x700
96#define XGBE10_HW_STATS_OFFSET 0x800
Wingman Kwok90cff9e2015-01-15 19:12:52 -050097#define XGBE10_HOST_PORT_NUM 0
Wingman Kwok90cff9e2015-01-15 19:12:52 -050098#define XGBE10_NUM_ALE_ENTRIES 1024
99
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500100#define GBE_TIMER_INTERVAL (HZ / 2)
101
102/* Soft reset register values */
103#define SOFT_RESET_MASK BIT(0)
104#define SOFT_RESET BIT(0)
105#define DEVICE_EMACSL_RESET_POLL_COUNT 100
106#define GMACSL_RET_WARN_RESET_INCOMPLETE -2
107
108#define MACSL_RX_ENABLE_CSF BIT(23)
109#define MACSL_ENABLE_EXT_CTL BIT(18)
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500110#define MACSL_XGMII_ENABLE BIT(13)
111#define MACSL_XGIG_MODE BIT(8)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500112#define MACSL_GIG_MODE BIT(7)
113#define MACSL_GMII_ENABLE BIT(5)
114#define MACSL_FULLDUPLEX BIT(0)
115
116#define GBE_CTL_P0_ENABLE BIT(2)
WingMan Kwok9a391c72015-03-20 16:11:25 -0400117#define GBE13_REG_VAL_STAT_ENABLE_ALL 0xff
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500118#define XGBE_REG_VAL_STAT_ENABLE_ALL 0xf
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500119#define GBE_STATS_CD_SEL BIT(28)
120
121#define GBE_PORT_MASK(x) (BIT(x) - 1)
122#define GBE_MASK_NO_PORTS 0
123
124#define GBE_DEF_1G_MAC_CONTROL \
125 (MACSL_GIG_MODE | MACSL_GMII_ENABLE | \
126 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
127
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500128#define GBE_DEF_10G_MAC_CONTROL \
129 (MACSL_XGIG_MODE | MACSL_XGMII_ENABLE | \
130 MACSL_ENABLE_EXT_CTL | MACSL_RX_ENABLE_CSF)
131
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500132#define GBE_STATSA_MODULE 0
133#define GBE_STATSB_MODULE 1
134#define GBE_STATSC_MODULE 2
135#define GBE_STATSD_MODULE 3
136
WingMan Kwok9a391c72015-03-20 16:11:25 -0400137#define GBENU_STATS0_MODULE 0
138#define GBENU_STATS1_MODULE 1
139#define GBENU_STATS2_MODULE 2
140#define GBENU_STATS3_MODULE 3
141#define GBENU_STATS4_MODULE 4
142#define GBENU_STATS5_MODULE 5
143#define GBENU_STATS6_MODULE 6
144#define GBENU_STATS7_MODULE 7
145#define GBENU_STATS8_MODULE 8
146
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500147#define XGBE_STATS0_MODULE 0
148#define XGBE_STATS1_MODULE 1
149#define XGBE_STATS2_MODULE 2
150
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500151/* s: 0-based slave_port */
152#define SGMII_BASE(s) \
153 (((s) < 2) ? gbe_dev->sgmii_port_regs : gbe_dev->sgmii_port34_regs)
154
155#define GBE_TX_QUEUE 648
156#define GBE_TXHOOK_ORDER 0
157#define GBE_DEFAULT_ALE_AGEOUT 30
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500158#define SLAVE_LINK_IS_XGMII(s) ((s)->link_interface >= XGMII_LINK_MAC_PHY)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500159#define NETCP_LINK_STATE_INVALID -1
160
161#define GBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
162 offsetof(struct gbe##_##rb, rn)
WingMan Kwok9a391c72015-03-20 16:11:25 -0400163#define GBENU_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
164 offsetof(struct gbenu##_##rb, rn)
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500165#define XGBE_SET_REG_OFS(p, rb, rn) p->rb##_ofs.rn = \
166 offsetof(struct xgbe##_##rb, rn)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500167#define GBE_REG_ADDR(p, rb, rn) (p->rb + p->rb##_ofs.rn)
168
WingMan Kwok9a391c72015-03-20 16:11:25 -0400169#define HOST_TX_PRI_MAP_DEFAULT 0x00000000
170
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500171struct xgbe_ss_regs {
172 u32 id_ver;
173 u32 synce_count;
174 u32 synce_mux;
175 u32 control;
176};
177
178struct xgbe_switch_regs {
179 u32 id_ver;
180 u32 control;
181 u32 emcontrol;
182 u32 stat_port_en;
183 u32 ptype;
184 u32 soft_idle;
185 u32 thru_rate;
186 u32 gap_thresh;
187 u32 tx_start_wds;
188 u32 flow_control;
189 u32 cppi_thresh;
190};
191
192struct xgbe_port_regs {
193 u32 blk_cnt;
194 u32 port_vlan;
195 u32 tx_pri_map;
196 u32 sa_lo;
197 u32 sa_hi;
198 u32 ts_ctl;
199 u32 ts_seq_ltype;
200 u32 ts_vlan;
201 u32 ts_ctl_ltype2;
202 u32 ts_ctl2;
203 u32 control;
204};
205
206struct xgbe_host_port_regs {
207 u32 blk_cnt;
208 u32 port_vlan;
209 u32 tx_pri_map;
210 u32 src_id;
211 u32 rx_pri_map;
212 u32 rx_maxlen;
213};
214
215struct xgbe_emac_regs {
216 u32 id_ver;
217 u32 mac_control;
218 u32 mac_status;
219 u32 soft_reset;
220 u32 rx_maxlen;
221 u32 __reserved_0;
222 u32 rx_pause;
223 u32 tx_pause;
224 u32 em_control;
225 u32 __reserved_1;
226 u32 tx_gap;
227 u32 rsvd[4];
228};
229
230struct xgbe_host_hw_stats {
231 u32 rx_good_frames;
232 u32 rx_broadcast_frames;
233 u32 rx_multicast_frames;
234 u32 __rsvd_0[3];
235 u32 rx_oversized_frames;
236 u32 __rsvd_1;
237 u32 rx_undersized_frames;
238 u32 __rsvd_2;
239 u32 overrun_type4;
240 u32 overrun_type5;
241 u32 rx_bytes;
242 u32 tx_good_frames;
243 u32 tx_broadcast_frames;
244 u32 tx_multicast_frames;
245 u32 __rsvd_3[9];
246 u32 tx_bytes;
247 u32 tx_64byte_frames;
248 u32 tx_65_to_127byte_frames;
249 u32 tx_128_to_255byte_frames;
250 u32 tx_256_to_511byte_frames;
251 u32 tx_512_to_1023byte_frames;
252 u32 tx_1024byte_frames;
253 u32 net_bytes;
254 u32 rx_sof_overruns;
255 u32 rx_mof_overruns;
256 u32 rx_dma_overruns;
257};
258
259struct xgbe_hw_stats {
260 u32 rx_good_frames;
261 u32 rx_broadcast_frames;
262 u32 rx_multicast_frames;
263 u32 rx_pause_frames;
264 u32 rx_crc_errors;
265 u32 rx_align_code_errors;
266 u32 rx_oversized_frames;
267 u32 rx_jabber_frames;
268 u32 rx_undersized_frames;
269 u32 rx_fragments;
270 u32 overrun_type4;
271 u32 overrun_type5;
272 u32 rx_bytes;
273 u32 tx_good_frames;
274 u32 tx_broadcast_frames;
275 u32 tx_multicast_frames;
276 u32 tx_pause_frames;
277 u32 tx_deferred_frames;
278 u32 tx_collision_frames;
279 u32 tx_single_coll_frames;
280 u32 tx_mult_coll_frames;
281 u32 tx_excessive_collisions;
282 u32 tx_late_collisions;
283 u32 tx_underrun;
284 u32 tx_carrier_sense_errors;
285 u32 tx_bytes;
286 u32 tx_64byte_frames;
287 u32 tx_65_to_127byte_frames;
288 u32 tx_128_to_255byte_frames;
289 u32 tx_256_to_511byte_frames;
290 u32 tx_512_to_1023byte_frames;
291 u32 tx_1024byte_frames;
292 u32 net_bytes;
293 u32 rx_sof_overruns;
294 u32 rx_mof_overruns;
295 u32 rx_dma_overruns;
296};
297
298#define XGBE10_NUM_STAT_ENTRIES (sizeof(struct xgbe_hw_stats)/sizeof(u32))
299
WingMan Kwok9a391c72015-03-20 16:11:25 -0400300struct gbenu_ss_regs {
301 u32 id_ver;
302 u32 synce_count; /* NU */
303 u32 synce_mux; /* NU */
304 u32 control; /* 2U */
305 u32 __rsvd_0[2]; /* 2U */
306 u32 rgmii_status; /* 2U */
307 u32 ss_status; /* 2U */
308};
309
310struct gbenu_switch_regs {
311 u32 id_ver;
312 u32 control;
313 u32 __rsvd_0[2];
314 u32 emcontrol;
315 u32 stat_port_en;
316 u32 ptype; /* NU */
317 u32 soft_idle;
318 u32 thru_rate; /* NU */
319 u32 gap_thresh; /* NU */
320 u32 tx_start_wds; /* NU */
321 u32 eee_prescale; /* 2U */
322 u32 tx_g_oflow_thresh_set; /* NU */
323 u32 tx_g_oflow_thresh_clr; /* NU */
324 u32 tx_g_buf_thresh_set_l; /* NU */
325 u32 tx_g_buf_thresh_set_h; /* NU */
326 u32 tx_g_buf_thresh_clr_l; /* NU */
327 u32 tx_g_buf_thresh_clr_h; /* NU */
328};
329
330struct gbenu_port_regs {
331 u32 __rsvd_0;
332 u32 control;
333 u32 max_blks; /* 2U */
334 u32 mem_align1;
335 u32 blk_cnt;
336 u32 port_vlan;
337 u32 tx_pri_map; /* NU */
338 u32 pri_ctl; /* 2U */
339 u32 rx_pri_map;
340 u32 rx_maxlen;
341 u32 tx_blks_pri; /* NU */
342 u32 __rsvd_1;
343 u32 idle2lpi; /* 2U */
344 u32 lpi2idle; /* 2U */
345 u32 eee_status; /* 2U */
346 u32 __rsvd_2;
347 u32 __rsvd_3[176]; /* NU: more to add */
348 u32 __rsvd_4[2];
349 u32 sa_lo;
350 u32 sa_hi;
351 u32 ts_ctl;
352 u32 ts_seq_ltype;
353 u32 ts_vlan;
354 u32 ts_ctl_ltype2;
355 u32 ts_ctl2;
356};
357
358struct gbenu_host_port_regs {
359 u32 __rsvd_0;
360 u32 control;
361 u32 flow_id_offset; /* 2U */
362 u32 __rsvd_1;
363 u32 blk_cnt;
364 u32 port_vlan;
365 u32 tx_pri_map; /* NU */
366 u32 pri_ctl;
367 u32 rx_pri_map;
368 u32 rx_maxlen;
369 u32 tx_blks_pri; /* NU */
370 u32 __rsvd_2;
371 u32 idle2lpi; /* 2U */
372 u32 lpi2wake; /* 2U */
373 u32 eee_status; /* 2U */
374 u32 __rsvd_3;
375 u32 __rsvd_4[184]; /* NU */
376 u32 host_blks_pri; /* NU */
377};
378
379struct gbenu_emac_regs {
380 u32 mac_control;
381 u32 mac_status;
382 u32 soft_reset;
383 u32 boff_test;
384 u32 rx_pause;
385 u32 __rsvd_0[11]; /* NU */
386 u32 tx_pause;
387 u32 __rsvd_1[11]; /* NU */
388 u32 em_control;
389 u32 tx_gap;
390};
391
392/* Some hw stat regs are applicable to slave port only.
393 * This is handled by gbenu_et_stats struct. Also some
394 * are for SS version NU and some are for 2U.
395 */
396struct gbenu_hw_stats {
397 u32 rx_good_frames;
398 u32 rx_broadcast_frames;
399 u32 rx_multicast_frames;
400 u32 rx_pause_frames; /* slave */
401 u32 rx_crc_errors;
402 u32 rx_align_code_errors; /* slave */
403 u32 rx_oversized_frames;
404 u32 rx_jabber_frames; /* slave */
405 u32 rx_undersized_frames;
406 u32 rx_fragments; /* slave */
407 u32 ale_drop;
408 u32 ale_overrun_drop;
409 u32 rx_bytes;
410 u32 tx_good_frames;
411 u32 tx_broadcast_frames;
412 u32 tx_multicast_frames;
413 u32 tx_pause_frames; /* slave */
414 u32 tx_deferred_frames; /* slave */
415 u32 tx_collision_frames; /* slave */
416 u32 tx_single_coll_frames; /* slave */
417 u32 tx_mult_coll_frames; /* slave */
418 u32 tx_excessive_collisions; /* slave */
419 u32 tx_late_collisions; /* slave */
420 u32 rx_ipg_error; /* slave 10G only */
421 u32 tx_carrier_sense_errors; /* slave */
422 u32 tx_bytes;
423 u32 tx_64B_frames;
424 u32 tx_65_to_127B_frames;
425 u32 tx_128_to_255B_frames;
426 u32 tx_256_to_511B_frames;
427 u32 tx_512_to_1023B_frames;
428 u32 tx_1024B_frames;
429 u32 net_bytes;
430 u32 rx_bottom_fifo_drop;
431 u32 rx_port_mask_drop;
432 u32 rx_top_fifo_drop;
433 u32 ale_rate_limit_drop;
434 u32 ale_vid_ingress_drop;
435 u32 ale_da_eq_sa_drop;
436 u32 __rsvd_0[3];
437 u32 ale_unknown_ucast;
438 u32 ale_unknown_ucast_bytes;
439 u32 ale_unknown_mcast;
440 u32 ale_unknown_mcast_bytes;
441 u32 ale_unknown_bcast;
442 u32 ale_unknown_bcast_bytes;
443 u32 ale_pol_match;
444 u32 ale_pol_match_red; /* NU */
445 u32 ale_pol_match_yellow; /* NU */
446 u32 __rsvd_1[44];
447 u32 tx_mem_protect_err;
448 /* following NU only */
449 u32 tx_pri0;
450 u32 tx_pri1;
451 u32 tx_pri2;
452 u32 tx_pri3;
453 u32 tx_pri4;
454 u32 tx_pri5;
455 u32 tx_pri6;
456 u32 tx_pri7;
457 u32 tx_pri0_bcnt;
458 u32 tx_pri1_bcnt;
459 u32 tx_pri2_bcnt;
460 u32 tx_pri3_bcnt;
461 u32 tx_pri4_bcnt;
462 u32 tx_pri5_bcnt;
463 u32 tx_pri6_bcnt;
464 u32 tx_pri7_bcnt;
465 u32 tx_pri0_drop;
466 u32 tx_pri1_drop;
467 u32 tx_pri2_drop;
468 u32 tx_pri3_drop;
469 u32 tx_pri4_drop;
470 u32 tx_pri5_drop;
471 u32 tx_pri6_drop;
472 u32 tx_pri7_drop;
473 u32 tx_pri0_drop_bcnt;
474 u32 tx_pri1_drop_bcnt;
475 u32 tx_pri2_drop_bcnt;
476 u32 tx_pri3_drop_bcnt;
477 u32 tx_pri4_drop_bcnt;
478 u32 tx_pri5_drop_bcnt;
479 u32 tx_pri6_drop_bcnt;
480 u32 tx_pri7_drop_bcnt;
481};
482
483#define GBENU_NUM_HW_STAT_ENTRIES (sizeof(struct gbenu_hw_stats) / sizeof(u32))
484#define GBENU_HW_STATS_REG_MAP_SZ 0x200
485
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500486struct gbe_ss_regs {
487 u32 id_ver;
488 u32 synce_count;
489 u32 synce_mux;
490};
491
492struct gbe_ss_regs_ofs {
493 u16 id_ver;
494 u16 control;
495};
496
497struct gbe_switch_regs {
498 u32 id_ver;
499 u32 control;
500 u32 soft_reset;
501 u32 stat_port_en;
502 u32 ptype;
503 u32 soft_idle;
504 u32 thru_rate;
505 u32 gap_thresh;
506 u32 tx_start_wds;
507 u32 flow_control;
508};
509
510struct gbe_switch_regs_ofs {
511 u16 id_ver;
512 u16 control;
513 u16 soft_reset;
514 u16 emcontrol;
515 u16 stat_port_en;
516 u16 ptype;
517 u16 flow_control;
518};
519
520struct gbe_port_regs {
521 u32 max_blks;
522 u32 blk_cnt;
523 u32 port_vlan;
524 u32 tx_pri_map;
525 u32 sa_lo;
526 u32 sa_hi;
527 u32 ts_ctl;
528 u32 ts_seq_ltype;
529 u32 ts_vlan;
530 u32 ts_ctl_ltype2;
531 u32 ts_ctl2;
532};
533
534struct gbe_port_regs_ofs {
535 u16 port_vlan;
536 u16 tx_pri_map;
537 u16 sa_lo;
538 u16 sa_hi;
539 u16 ts_ctl;
540 u16 ts_seq_ltype;
541 u16 ts_vlan;
542 u16 ts_ctl_ltype2;
543 u16 ts_ctl2;
WingMan Kwok9a391c72015-03-20 16:11:25 -0400544 u16 rx_maxlen; /* 2U, NU */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500545};
546
547struct gbe_host_port_regs {
548 u32 src_id;
549 u32 port_vlan;
550 u32 rx_pri_map;
551 u32 rx_maxlen;
552};
553
554struct gbe_host_port_regs_ofs {
555 u16 port_vlan;
556 u16 tx_pri_map;
557 u16 rx_maxlen;
558};
559
560struct gbe_emac_regs {
561 u32 id_ver;
562 u32 mac_control;
563 u32 mac_status;
564 u32 soft_reset;
565 u32 rx_maxlen;
566 u32 __reserved_0;
567 u32 rx_pause;
568 u32 tx_pause;
569 u32 __reserved_1;
570 u32 rx_pri_map;
571 u32 rsvd[6];
572};
573
574struct gbe_emac_regs_ofs {
575 u16 mac_control;
576 u16 soft_reset;
577 u16 rx_maxlen;
578};
579
580struct gbe_hw_stats {
581 u32 rx_good_frames;
582 u32 rx_broadcast_frames;
583 u32 rx_multicast_frames;
584 u32 rx_pause_frames;
585 u32 rx_crc_errors;
586 u32 rx_align_code_errors;
587 u32 rx_oversized_frames;
588 u32 rx_jabber_frames;
589 u32 rx_undersized_frames;
590 u32 rx_fragments;
591 u32 __pad_0[2];
592 u32 rx_bytes;
593 u32 tx_good_frames;
594 u32 tx_broadcast_frames;
595 u32 tx_multicast_frames;
596 u32 tx_pause_frames;
597 u32 tx_deferred_frames;
598 u32 tx_collision_frames;
599 u32 tx_single_coll_frames;
600 u32 tx_mult_coll_frames;
601 u32 tx_excessive_collisions;
602 u32 tx_late_collisions;
603 u32 tx_underrun;
604 u32 tx_carrier_sense_errors;
605 u32 tx_bytes;
606 u32 tx_64byte_frames;
607 u32 tx_65_to_127byte_frames;
608 u32 tx_128_to_255byte_frames;
609 u32 tx_256_to_511byte_frames;
610 u32 tx_512_to_1023byte_frames;
611 u32 tx_1024byte_frames;
612 u32 net_bytes;
613 u32 rx_sof_overruns;
614 u32 rx_mof_overruns;
615 u32 rx_dma_overruns;
616};
617
618#define GBE13_NUM_HW_STAT_ENTRIES (sizeof(struct gbe_hw_stats)/sizeof(u32))
WingMan Kwok9a391c72015-03-20 16:11:25 -0400619#define GBE_MAX_HW_STAT_MODS 9
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500620#define GBE_HW_STATS_REG_MAP_SZ 0x100
621
622struct gbe_slave {
623 void __iomem *port_regs;
624 void __iomem *emac_regs;
625 struct gbe_port_regs_ofs port_regs_ofs;
626 struct gbe_emac_regs_ofs emac_regs_ofs;
627 int slave_num; /* 0 based logical number */
628 int port_num; /* actual port number */
629 atomic_t link_state;
630 bool open;
631 struct phy_device *phy;
632 u32 link_interface;
633 u32 mac_control;
634 u8 phy_port_t;
635 struct device_node *phy_node;
636 struct list_head slave_list;
637};
638
639struct gbe_priv {
640 struct device *dev;
641 struct netcp_device *netcp_device;
642 struct timer_list timer;
643 u32 num_slaves;
644 u32 ale_entries;
645 u32 ale_ports;
646 bool enable_ale;
WingMan Kwok9a391c72015-03-20 16:11:25 -0400647 u8 max_num_slaves;
648 u8 max_num_ports; /* max_num_slaves + 1 */
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500649 struct netcp_tx_pipe tx_pipe;
650
651 int host_port;
652 u32 rx_packet_max;
653 u32 ss_version;
WingMan Kwok9a391c72015-03-20 16:11:25 -0400654 u32 stats_en_mask;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500655
656 void __iomem *ss_regs;
657 void __iomem *switch_regs;
658 void __iomem *host_port_regs;
659 void __iomem *ale_reg;
660 void __iomem *sgmii_port_regs;
661 void __iomem *sgmii_port34_regs;
662 void __iomem *xgbe_serdes_regs;
663 void __iomem *hw_stats_regs[GBE_MAX_HW_STAT_MODS];
664
665 struct gbe_ss_regs_ofs ss_regs_ofs;
666 struct gbe_switch_regs_ofs switch_regs_ofs;
667 struct gbe_host_port_regs_ofs host_port_regs_ofs;
668
669 struct cpsw_ale *ale;
670 unsigned int tx_queue_id;
671 const char *dma_chan_name;
672
673 struct list_head gbe_intf_head;
674 struct list_head secondary_slaves;
675 struct net_device *dummy_ndev;
676
677 u64 *hw_stats;
678 const struct netcp_ethtool_stat *et_stats;
679 int num_et_stats;
680 /* Lock for updating the hwstats */
681 spinlock_t hw_stats_lock;
682};
683
684struct gbe_intf {
685 struct net_device *ndev;
686 struct device *dev;
687 struct gbe_priv *gbe_dev;
688 struct netcp_tx_pipe tx_pipe;
689 struct gbe_slave *slave;
690 struct list_head gbe_intf_list;
691 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
692};
693
694static struct netcp_module gbe_module;
Wingman Kwok90cff9e2015-01-15 19:12:52 -0500695static struct netcp_module xgbe_module;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500696
697/* Statistic management */
698struct netcp_ethtool_stat {
699 char desc[ETH_GSTRING_LEN];
700 int type;
701 u32 size;
702 int offset;
703};
704
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400705#define GBE_STATSA_INFO(field) \
706{ \
707 "GBE_A:"#field, GBE_STATSA_MODULE, \
708 FIELD_SIZEOF(struct gbe_hw_stats, field), \
709 offsetof(struct gbe_hw_stats, field) \
710}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500711
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400712#define GBE_STATSB_INFO(field) \
713{ \
714 "GBE_B:"#field, GBE_STATSB_MODULE, \
715 FIELD_SIZEOF(struct gbe_hw_stats, field), \
716 offsetof(struct gbe_hw_stats, field) \
717}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500718
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400719#define GBE_STATSC_INFO(field) \
720{ \
721 "GBE_C:"#field, GBE_STATSC_MODULE, \
722 FIELD_SIZEOF(struct gbe_hw_stats, field), \
723 offsetof(struct gbe_hw_stats, field) \
724}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500725
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400726#define GBE_STATSD_INFO(field) \
727{ \
728 "GBE_D:"#field, GBE_STATSD_MODULE, \
729 FIELD_SIZEOF(struct gbe_hw_stats, field), \
730 offsetof(struct gbe_hw_stats, field) \
731}
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500732
733static const struct netcp_ethtool_stat gbe13_et_stats[] = {
734 /* GBE module A */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400735 GBE_STATSA_INFO(rx_good_frames),
736 GBE_STATSA_INFO(rx_broadcast_frames),
737 GBE_STATSA_INFO(rx_multicast_frames),
738 GBE_STATSA_INFO(rx_pause_frames),
739 GBE_STATSA_INFO(rx_crc_errors),
740 GBE_STATSA_INFO(rx_align_code_errors),
741 GBE_STATSA_INFO(rx_oversized_frames),
742 GBE_STATSA_INFO(rx_jabber_frames),
743 GBE_STATSA_INFO(rx_undersized_frames),
744 GBE_STATSA_INFO(rx_fragments),
745 GBE_STATSA_INFO(rx_bytes),
746 GBE_STATSA_INFO(tx_good_frames),
747 GBE_STATSA_INFO(tx_broadcast_frames),
748 GBE_STATSA_INFO(tx_multicast_frames),
749 GBE_STATSA_INFO(tx_pause_frames),
750 GBE_STATSA_INFO(tx_deferred_frames),
751 GBE_STATSA_INFO(tx_collision_frames),
752 GBE_STATSA_INFO(tx_single_coll_frames),
753 GBE_STATSA_INFO(tx_mult_coll_frames),
754 GBE_STATSA_INFO(tx_excessive_collisions),
755 GBE_STATSA_INFO(tx_late_collisions),
756 GBE_STATSA_INFO(tx_underrun),
757 GBE_STATSA_INFO(tx_carrier_sense_errors),
758 GBE_STATSA_INFO(tx_bytes),
759 GBE_STATSA_INFO(tx_64byte_frames),
760 GBE_STATSA_INFO(tx_65_to_127byte_frames),
761 GBE_STATSA_INFO(tx_128_to_255byte_frames),
762 GBE_STATSA_INFO(tx_256_to_511byte_frames),
763 GBE_STATSA_INFO(tx_512_to_1023byte_frames),
764 GBE_STATSA_INFO(tx_1024byte_frames),
765 GBE_STATSA_INFO(net_bytes),
766 GBE_STATSA_INFO(rx_sof_overruns),
767 GBE_STATSA_INFO(rx_mof_overruns),
768 GBE_STATSA_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500769 /* GBE module B */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400770 GBE_STATSB_INFO(rx_good_frames),
771 GBE_STATSB_INFO(rx_broadcast_frames),
772 GBE_STATSB_INFO(rx_multicast_frames),
773 GBE_STATSB_INFO(rx_pause_frames),
774 GBE_STATSB_INFO(rx_crc_errors),
775 GBE_STATSB_INFO(rx_align_code_errors),
776 GBE_STATSB_INFO(rx_oversized_frames),
777 GBE_STATSB_INFO(rx_jabber_frames),
778 GBE_STATSB_INFO(rx_undersized_frames),
779 GBE_STATSB_INFO(rx_fragments),
780 GBE_STATSB_INFO(rx_bytes),
781 GBE_STATSB_INFO(tx_good_frames),
782 GBE_STATSB_INFO(tx_broadcast_frames),
783 GBE_STATSB_INFO(tx_multicast_frames),
784 GBE_STATSB_INFO(tx_pause_frames),
785 GBE_STATSB_INFO(tx_deferred_frames),
786 GBE_STATSB_INFO(tx_collision_frames),
787 GBE_STATSB_INFO(tx_single_coll_frames),
788 GBE_STATSB_INFO(tx_mult_coll_frames),
789 GBE_STATSB_INFO(tx_excessive_collisions),
790 GBE_STATSB_INFO(tx_late_collisions),
791 GBE_STATSB_INFO(tx_underrun),
792 GBE_STATSB_INFO(tx_carrier_sense_errors),
793 GBE_STATSB_INFO(tx_bytes),
794 GBE_STATSB_INFO(tx_64byte_frames),
795 GBE_STATSB_INFO(tx_65_to_127byte_frames),
796 GBE_STATSB_INFO(tx_128_to_255byte_frames),
797 GBE_STATSB_INFO(tx_256_to_511byte_frames),
798 GBE_STATSB_INFO(tx_512_to_1023byte_frames),
799 GBE_STATSB_INFO(tx_1024byte_frames),
800 GBE_STATSB_INFO(net_bytes),
801 GBE_STATSB_INFO(rx_sof_overruns),
802 GBE_STATSB_INFO(rx_mof_overruns),
803 GBE_STATSB_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500804 /* GBE module C */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400805 GBE_STATSC_INFO(rx_good_frames),
806 GBE_STATSC_INFO(rx_broadcast_frames),
807 GBE_STATSC_INFO(rx_multicast_frames),
808 GBE_STATSC_INFO(rx_pause_frames),
809 GBE_STATSC_INFO(rx_crc_errors),
810 GBE_STATSC_INFO(rx_align_code_errors),
811 GBE_STATSC_INFO(rx_oversized_frames),
812 GBE_STATSC_INFO(rx_jabber_frames),
813 GBE_STATSC_INFO(rx_undersized_frames),
814 GBE_STATSC_INFO(rx_fragments),
815 GBE_STATSC_INFO(rx_bytes),
816 GBE_STATSC_INFO(tx_good_frames),
817 GBE_STATSC_INFO(tx_broadcast_frames),
818 GBE_STATSC_INFO(tx_multicast_frames),
819 GBE_STATSC_INFO(tx_pause_frames),
820 GBE_STATSC_INFO(tx_deferred_frames),
821 GBE_STATSC_INFO(tx_collision_frames),
822 GBE_STATSC_INFO(tx_single_coll_frames),
823 GBE_STATSC_INFO(tx_mult_coll_frames),
824 GBE_STATSC_INFO(tx_excessive_collisions),
825 GBE_STATSC_INFO(tx_late_collisions),
826 GBE_STATSC_INFO(tx_underrun),
827 GBE_STATSC_INFO(tx_carrier_sense_errors),
828 GBE_STATSC_INFO(tx_bytes),
829 GBE_STATSC_INFO(tx_64byte_frames),
830 GBE_STATSC_INFO(tx_65_to_127byte_frames),
831 GBE_STATSC_INFO(tx_128_to_255byte_frames),
832 GBE_STATSC_INFO(tx_256_to_511byte_frames),
833 GBE_STATSC_INFO(tx_512_to_1023byte_frames),
834 GBE_STATSC_INFO(tx_1024byte_frames),
835 GBE_STATSC_INFO(net_bytes),
836 GBE_STATSC_INFO(rx_sof_overruns),
837 GBE_STATSC_INFO(rx_mof_overruns),
838 GBE_STATSC_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500839 /* GBE module D */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -0400840 GBE_STATSD_INFO(rx_good_frames),
841 GBE_STATSD_INFO(rx_broadcast_frames),
842 GBE_STATSD_INFO(rx_multicast_frames),
843 GBE_STATSD_INFO(rx_pause_frames),
844 GBE_STATSD_INFO(rx_crc_errors),
845 GBE_STATSD_INFO(rx_align_code_errors),
846 GBE_STATSD_INFO(rx_oversized_frames),
847 GBE_STATSD_INFO(rx_jabber_frames),
848 GBE_STATSD_INFO(rx_undersized_frames),
849 GBE_STATSD_INFO(rx_fragments),
850 GBE_STATSD_INFO(rx_bytes),
851 GBE_STATSD_INFO(tx_good_frames),
852 GBE_STATSD_INFO(tx_broadcast_frames),
853 GBE_STATSD_INFO(tx_multicast_frames),
854 GBE_STATSD_INFO(tx_pause_frames),
855 GBE_STATSD_INFO(tx_deferred_frames),
856 GBE_STATSD_INFO(tx_collision_frames),
857 GBE_STATSD_INFO(tx_single_coll_frames),
858 GBE_STATSD_INFO(tx_mult_coll_frames),
859 GBE_STATSD_INFO(tx_excessive_collisions),
860 GBE_STATSD_INFO(tx_late_collisions),
861 GBE_STATSD_INFO(tx_underrun),
862 GBE_STATSD_INFO(tx_carrier_sense_errors),
863 GBE_STATSD_INFO(tx_bytes),
864 GBE_STATSD_INFO(tx_64byte_frames),
865 GBE_STATSD_INFO(tx_65_to_127byte_frames),
866 GBE_STATSD_INFO(tx_128_to_255byte_frames),
867 GBE_STATSD_INFO(tx_256_to_511byte_frames),
868 GBE_STATSD_INFO(tx_512_to_1023byte_frames),
869 GBE_STATSD_INFO(tx_1024byte_frames),
870 GBE_STATSD_INFO(net_bytes),
871 GBE_STATSD_INFO(rx_sof_overruns),
872 GBE_STATSD_INFO(rx_mof_overruns),
873 GBE_STATSD_INFO(rx_dma_overruns),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -0500874};
875
WingMan Kwok9a391c72015-03-20 16:11:25 -0400876/* This is the size of entries in GBENU_STATS_HOST */
877#define GBENU_ET_STATS_HOST_SIZE 33
878
879#define GBENU_STATS_HOST(field) \
880{ \
881 "GBE_HOST:"#field, GBENU_STATS0_MODULE, \
882 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
883 offsetof(struct gbenu_hw_stats, field) \
884}
885
886/* This is the size of entries in GBENU_STATS_HOST */
887#define GBENU_ET_STATS_PORT_SIZE 46
888
889#define GBENU_STATS_P1(field) \
890{ \
891 "GBE_P1:"#field, GBENU_STATS1_MODULE, \
892 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
893 offsetof(struct gbenu_hw_stats, field) \
894}
895
896#define GBENU_STATS_P2(field) \
897{ \
898 "GBE_P2:"#field, GBENU_STATS2_MODULE, \
899 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
900 offsetof(struct gbenu_hw_stats, field) \
901}
902
903#define GBENU_STATS_P3(field) \
904{ \
905 "GBE_P3:"#field, GBENU_STATS3_MODULE, \
906 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
907 offsetof(struct gbenu_hw_stats, field) \
908}
909
910#define GBENU_STATS_P4(field) \
911{ \
912 "GBE_P4:"#field, GBENU_STATS4_MODULE, \
913 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
914 offsetof(struct gbenu_hw_stats, field) \
915}
916
917#define GBENU_STATS_P5(field) \
918{ \
919 "GBE_P5:"#field, GBENU_STATS5_MODULE, \
920 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
921 offsetof(struct gbenu_hw_stats, field) \
922}
923
924#define GBENU_STATS_P6(field) \
925{ \
926 "GBE_P6:"#field, GBENU_STATS6_MODULE, \
927 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
928 offsetof(struct gbenu_hw_stats, field) \
929}
930
931#define GBENU_STATS_P7(field) \
932{ \
933 "GBE_P7:"#field, GBENU_STATS7_MODULE, \
934 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
935 offsetof(struct gbenu_hw_stats, field) \
936}
937
938#define GBENU_STATS_P8(field) \
939{ \
940 "GBE_P8:"#field, GBENU_STATS8_MODULE, \
941 FIELD_SIZEOF(struct gbenu_hw_stats, field), \
942 offsetof(struct gbenu_hw_stats, field) \
943}
944
945static const struct netcp_ethtool_stat gbenu_et_stats[] = {
946 /* GBENU Host Module */
947 GBENU_STATS_HOST(rx_good_frames),
948 GBENU_STATS_HOST(rx_broadcast_frames),
949 GBENU_STATS_HOST(rx_multicast_frames),
950 GBENU_STATS_HOST(rx_crc_errors),
951 GBENU_STATS_HOST(rx_oversized_frames),
952 GBENU_STATS_HOST(rx_undersized_frames),
953 GBENU_STATS_HOST(ale_drop),
954 GBENU_STATS_HOST(ale_overrun_drop),
955 GBENU_STATS_HOST(rx_bytes),
956 GBENU_STATS_HOST(tx_good_frames),
957 GBENU_STATS_HOST(tx_broadcast_frames),
958 GBENU_STATS_HOST(tx_multicast_frames),
959 GBENU_STATS_HOST(tx_bytes),
960 GBENU_STATS_HOST(tx_64B_frames),
961 GBENU_STATS_HOST(tx_65_to_127B_frames),
962 GBENU_STATS_HOST(tx_128_to_255B_frames),
963 GBENU_STATS_HOST(tx_256_to_511B_frames),
964 GBENU_STATS_HOST(tx_512_to_1023B_frames),
965 GBENU_STATS_HOST(tx_1024B_frames),
966 GBENU_STATS_HOST(net_bytes),
967 GBENU_STATS_HOST(rx_bottom_fifo_drop),
968 GBENU_STATS_HOST(rx_port_mask_drop),
969 GBENU_STATS_HOST(rx_top_fifo_drop),
970 GBENU_STATS_HOST(ale_rate_limit_drop),
971 GBENU_STATS_HOST(ale_vid_ingress_drop),
972 GBENU_STATS_HOST(ale_da_eq_sa_drop),
973 GBENU_STATS_HOST(ale_unknown_ucast),
974 GBENU_STATS_HOST(ale_unknown_ucast_bytes),
975 GBENU_STATS_HOST(ale_unknown_mcast),
976 GBENU_STATS_HOST(ale_unknown_mcast_bytes),
977 GBENU_STATS_HOST(ale_unknown_bcast),
978 GBENU_STATS_HOST(ale_unknown_bcast_bytes),
979 GBENU_STATS_HOST(tx_mem_protect_err),
980 /* GBENU Module 1 */
981 GBENU_STATS_P1(rx_good_frames),
982 GBENU_STATS_P1(rx_broadcast_frames),
983 GBENU_STATS_P1(rx_multicast_frames),
984 GBENU_STATS_P1(rx_pause_frames),
985 GBENU_STATS_P1(rx_crc_errors),
986 GBENU_STATS_P1(rx_align_code_errors),
987 GBENU_STATS_P1(rx_oversized_frames),
988 GBENU_STATS_P1(rx_jabber_frames),
989 GBENU_STATS_P1(rx_undersized_frames),
990 GBENU_STATS_P1(rx_fragments),
991 GBENU_STATS_P1(ale_drop),
992 GBENU_STATS_P1(ale_overrun_drop),
993 GBENU_STATS_P1(rx_bytes),
994 GBENU_STATS_P1(tx_good_frames),
995 GBENU_STATS_P1(tx_broadcast_frames),
996 GBENU_STATS_P1(tx_multicast_frames),
997 GBENU_STATS_P1(tx_pause_frames),
998 GBENU_STATS_P1(tx_deferred_frames),
999 GBENU_STATS_P1(tx_collision_frames),
1000 GBENU_STATS_P1(tx_single_coll_frames),
1001 GBENU_STATS_P1(tx_mult_coll_frames),
1002 GBENU_STATS_P1(tx_excessive_collisions),
1003 GBENU_STATS_P1(tx_late_collisions),
1004 GBENU_STATS_P1(rx_ipg_error),
1005 GBENU_STATS_P1(tx_carrier_sense_errors),
1006 GBENU_STATS_P1(tx_bytes),
1007 GBENU_STATS_P1(tx_64B_frames),
1008 GBENU_STATS_P1(tx_65_to_127B_frames),
1009 GBENU_STATS_P1(tx_128_to_255B_frames),
1010 GBENU_STATS_P1(tx_256_to_511B_frames),
1011 GBENU_STATS_P1(tx_512_to_1023B_frames),
1012 GBENU_STATS_P1(tx_1024B_frames),
1013 GBENU_STATS_P1(net_bytes),
1014 GBENU_STATS_P1(rx_bottom_fifo_drop),
1015 GBENU_STATS_P1(rx_port_mask_drop),
1016 GBENU_STATS_P1(rx_top_fifo_drop),
1017 GBENU_STATS_P1(ale_rate_limit_drop),
1018 GBENU_STATS_P1(ale_vid_ingress_drop),
1019 GBENU_STATS_P1(ale_da_eq_sa_drop),
1020 GBENU_STATS_P1(ale_unknown_ucast),
1021 GBENU_STATS_P1(ale_unknown_ucast_bytes),
1022 GBENU_STATS_P1(ale_unknown_mcast),
1023 GBENU_STATS_P1(ale_unknown_mcast_bytes),
1024 GBENU_STATS_P1(ale_unknown_bcast),
1025 GBENU_STATS_P1(ale_unknown_bcast_bytes),
1026 GBENU_STATS_P1(tx_mem_protect_err),
1027 /* GBENU Module 2 */
1028 GBENU_STATS_P2(rx_good_frames),
1029 GBENU_STATS_P2(rx_broadcast_frames),
1030 GBENU_STATS_P2(rx_multicast_frames),
1031 GBENU_STATS_P2(rx_pause_frames),
1032 GBENU_STATS_P2(rx_crc_errors),
1033 GBENU_STATS_P2(rx_align_code_errors),
1034 GBENU_STATS_P2(rx_oversized_frames),
1035 GBENU_STATS_P2(rx_jabber_frames),
1036 GBENU_STATS_P2(rx_undersized_frames),
1037 GBENU_STATS_P2(rx_fragments),
1038 GBENU_STATS_P2(ale_drop),
1039 GBENU_STATS_P2(ale_overrun_drop),
1040 GBENU_STATS_P2(rx_bytes),
1041 GBENU_STATS_P2(tx_good_frames),
1042 GBENU_STATS_P2(tx_broadcast_frames),
1043 GBENU_STATS_P2(tx_multicast_frames),
1044 GBENU_STATS_P2(tx_pause_frames),
1045 GBENU_STATS_P2(tx_deferred_frames),
1046 GBENU_STATS_P2(tx_collision_frames),
1047 GBENU_STATS_P2(tx_single_coll_frames),
1048 GBENU_STATS_P2(tx_mult_coll_frames),
1049 GBENU_STATS_P2(tx_excessive_collisions),
1050 GBENU_STATS_P2(tx_late_collisions),
1051 GBENU_STATS_P2(rx_ipg_error),
1052 GBENU_STATS_P2(tx_carrier_sense_errors),
1053 GBENU_STATS_P2(tx_bytes),
1054 GBENU_STATS_P2(tx_64B_frames),
1055 GBENU_STATS_P2(tx_65_to_127B_frames),
1056 GBENU_STATS_P2(tx_128_to_255B_frames),
1057 GBENU_STATS_P2(tx_256_to_511B_frames),
1058 GBENU_STATS_P2(tx_512_to_1023B_frames),
1059 GBENU_STATS_P2(tx_1024B_frames),
1060 GBENU_STATS_P2(net_bytes),
1061 GBENU_STATS_P2(rx_bottom_fifo_drop),
1062 GBENU_STATS_P2(rx_port_mask_drop),
1063 GBENU_STATS_P2(rx_top_fifo_drop),
1064 GBENU_STATS_P2(ale_rate_limit_drop),
1065 GBENU_STATS_P2(ale_vid_ingress_drop),
1066 GBENU_STATS_P2(ale_da_eq_sa_drop),
1067 GBENU_STATS_P2(ale_unknown_ucast),
1068 GBENU_STATS_P2(ale_unknown_ucast_bytes),
1069 GBENU_STATS_P2(ale_unknown_mcast),
1070 GBENU_STATS_P2(ale_unknown_mcast_bytes),
1071 GBENU_STATS_P2(ale_unknown_bcast),
1072 GBENU_STATS_P2(ale_unknown_bcast_bytes),
1073 GBENU_STATS_P2(tx_mem_protect_err),
1074 /* GBENU Module 3 */
1075 GBENU_STATS_P3(rx_good_frames),
1076 GBENU_STATS_P3(rx_broadcast_frames),
1077 GBENU_STATS_P3(rx_multicast_frames),
1078 GBENU_STATS_P3(rx_pause_frames),
1079 GBENU_STATS_P3(rx_crc_errors),
1080 GBENU_STATS_P3(rx_align_code_errors),
1081 GBENU_STATS_P3(rx_oversized_frames),
1082 GBENU_STATS_P3(rx_jabber_frames),
1083 GBENU_STATS_P3(rx_undersized_frames),
1084 GBENU_STATS_P3(rx_fragments),
1085 GBENU_STATS_P3(ale_drop),
1086 GBENU_STATS_P3(ale_overrun_drop),
1087 GBENU_STATS_P3(rx_bytes),
1088 GBENU_STATS_P3(tx_good_frames),
1089 GBENU_STATS_P3(tx_broadcast_frames),
1090 GBENU_STATS_P3(tx_multicast_frames),
1091 GBENU_STATS_P3(tx_pause_frames),
1092 GBENU_STATS_P3(tx_deferred_frames),
1093 GBENU_STATS_P3(tx_collision_frames),
1094 GBENU_STATS_P3(tx_single_coll_frames),
1095 GBENU_STATS_P3(tx_mult_coll_frames),
1096 GBENU_STATS_P3(tx_excessive_collisions),
1097 GBENU_STATS_P3(tx_late_collisions),
1098 GBENU_STATS_P3(rx_ipg_error),
1099 GBENU_STATS_P3(tx_carrier_sense_errors),
1100 GBENU_STATS_P3(tx_bytes),
1101 GBENU_STATS_P3(tx_64B_frames),
1102 GBENU_STATS_P3(tx_65_to_127B_frames),
1103 GBENU_STATS_P3(tx_128_to_255B_frames),
1104 GBENU_STATS_P3(tx_256_to_511B_frames),
1105 GBENU_STATS_P3(tx_512_to_1023B_frames),
1106 GBENU_STATS_P3(tx_1024B_frames),
1107 GBENU_STATS_P3(net_bytes),
1108 GBENU_STATS_P3(rx_bottom_fifo_drop),
1109 GBENU_STATS_P3(rx_port_mask_drop),
1110 GBENU_STATS_P3(rx_top_fifo_drop),
1111 GBENU_STATS_P3(ale_rate_limit_drop),
1112 GBENU_STATS_P3(ale_vid_ingress_drop),
1113 GBENU_STATS_P3(ale_da_eq_sa_drop),
1114 GBENU_STATS_P3(ale_unknown_ucast),
1115 GBENU_STATS_P3(ale_unknown_ucast_bytes),
1116 GBENU_STATS_P3(ale_unknown_mcast),
1117 GBENU_STATS_P3(ale_unknown_mcast_bytes),
1118 GBENU_STATS_P3(ale_unknown_bcast),
1119 GBENU_STATS_P3(ale_unknown_bcast_bytes),
1120 GBENU_STATS_P3(tx_mem_protect_err),
1121 /* GBENU Module 4 */
1122 GBENU_STATS_P4(rx_good_frames),
1123 GBENU_STATS_P4(rx_broadcast_frames),
1124 GBENU_STATS_P4(rx_multicast_frames),
1125 GBENU_STATS_P4(rx_pause_frames),
1126 GBENU_STATS_P4(rx_crc_errors),
1127 GBENU_STATS_P4(rx_align_code_errors),
1128 GBENU_STATS_P4(rx_oversized_frames),
1129 GBENU_STATS_P4(rx_jabber_frames),
1130 GBENU_STATS_P4(rx_undersized_frames),
1131 GBENU_STATS_P4(rx_fragments),
1132 GBENU_STATS_P4(ale_drop),
1133 GBENU_STATS_P4(ale_overrun_drop),
1134 GBENU_STATS_P4(rx_bytes),
1135 GBENU_STATS_P4(tx_good_frames),
1136 GBENU_STATS_P4(tx_broadcast_frames),
1137 GBENU_STATS_P4(tx_multicast_frames),
1138 GBENU_STATS_P4(tx_pause_frames),
1139 GBENU_STATS_P4(tx_deferred_frames),
1140 GBENU_STATS_P4(tx_collision_frames),
1141 GBENU_STATS_P4(tx_single_coll_frames),
1142 GBENU_STATS_P4(tx_mult_coll_frames),
1143 GBENU_STATS_P4(tx_excessive_collisions),
1144 GBENU_STATS_P4(tx_late_collisions),
1145 GBENU_STATS_P4(rx_ipg_error),
1146 GBENU_STATS_P4(tx_carrier_sense_errors),
1147 GBENU_STATS_P4(tx_bytes),
1148 GBENU_STATS_P4(tx_64B_frames),
1149 GBENU_STATS_P4(tx_65_to_127B_frames),
1150 GBENU_STATS_P4(tx_128_to_255B_frames),
1151 GBENU_STATS_P4(tx_256_to_511B_frames),
1152 GBENU_STATS_P4(tx_512_to_1023B_frames),
1153 GBENU_STATS_P4(tx_1024B_frames),
1154 GBENU_STATS_P4(net_bytes),
1155 GBENU_STATS_P4(rx_bottom_fifo_drop),
1156 GBENU_STATS_P4(rx_port_mask_drop),
1157 GBENU_STATS_P4(rx_top_fifo_drop),
1158 GBENU_STATS_P4(ale_rate_limit_drop),
1159 GBENU_STATS_P4(ale_vid_ingress_drop),
1160 GBENU_STATS_P4(ale_da_eq_sa_drop),
1161 GBENU_STATS_P4(ale_unknown_ucast),
1162 GBENU_STATS_P4(ale_unknown_ucast_bytes),
1163 GBENU_STATS_P4(ale_unknown_mcast),
1164 GBENU_STATS_P4(ale_unknown_mcast_bytes),
1165 GBENU_STATS_P4(ale_unknown_bcast),
1166 GBENU_STATS_P4(ale_unknown_bcast_bytes),
1167 GBENU_STATS_P4(tx_mem_protect_err),
1168 /* GBENU Module 5 */
1169 GBENU_STATS_P5(rx_good_frames),
1170 GBENU_STATS_P5(rx_broadcast_frames),
1171 GBENU_STATS_P5(rx_multicast_frames),
1172 GBENU_STATS_P5(rx_pause_frames),
1173 GBENU_STATS_P5(rx_crc_errors),
1174 GBENU_STATS_P5(rx_align_code_errors),
1175 GBENU_STATS_P5(rx_oversized_frames),
1176 GBENU_STATS_P5(rx_jabber_frames),
1177 GBENU_STATS_P5(rx_undersized_frames),
1178 GBENU_STATS_P5(rx_fragments),
1179 GBENU_STATS_P5(ale_drop),
1180 GBENU_STATS_P5(ale_overrun_drop),
1181 GBENU_STATS_P5(rx_bytes),
1182 GBENU_STATS_P5(tx_good_frames),
1183 GBENU_STATS_P5(tx_broadcast_frames),
1184 GBENU_STATS_P5(tx_multicast_frames),
1185 GBENU_STATS_P5(tx_pause_frames),
1186 GBENU_STATS_P5(tx_deferred_frames),
1187 GBENU_STATS_P5(tx_collision_frames),
1188 GBENU_STATS_P5(tx_single_coll_frames),
1189 GBENU_STATS_P5(tx_mult_coll_frames),
1190 GBENU_STATS_P5(tx_excessive_collisions),
1191 GBENU_STATS_P5(tx_late_collisions),
1192 GBENU_STATS_P5(rx_ipg_error),
1193 GBENU_STATS_P5(tx_carrier_sense_errors),
1194 GBENU_STATS_P5(tx_bytes),
1195 GBENU_STATS_P5(tx_64B_frames),
1196 GBENU_STATS_P5(tx_65_to_127B_frames),
1197 GBENU_STATS_P5(tx_128_to_255B_frames),
1198 GBENU_STATS_P5(tx_256_to_511B_frames),
1199 GBENU_STATS_P5(tx_512_to_1023B_frames),
1200 GBENU_STATS_P5(tx_1024B_frames),
1201 GBENU_STATS_P5(net_bytes),
1202 GBENU_STATS_P5(rx_bottom_fifo_drop),
1203 GBENU_STATS_P5(rx_port_mask_drop),
1204 GBENU_STATS_P5(rx_top_fifo_drop),
1205 GBENU_STATS_P5(ale_rate_limit_drop),
1206 GBENU_STATS_P5(ale_vid_ingress_drop),
1207 GBENU_STATS_P5(ale_da_eq_sa_drop),
1208 GBENU_STATS_P5(ale_unknown_ucast),
1209 GBENU_STATS_P5(ale_unknown_ucast_bytes),
1210 GBENU_STATS_P5(ale_unknown_mcast),
1211 GBENU_STATS_P5(ale_unknown_mcast_bytes),
1212 GBENU_STATS_P5(ale_unknown_bcast),
1213 GBENU_STATS_P5(ale_unknown_bcast_bytes),
1214 GBENU_STATS_P5(tx_mem_protect_err),
1215 /* GBENU Module 6 */
1216 GBENU_STATS_P6(rx_good_frames),
1217 GBENU_STATS_P6(rx_broadcast_frames),
1218 GBENU_STATS_P6(rx_multicast_frames),
1219 GBENU_STATS_P6(rx_pause_frames),
1220 GBENU_STATS_P6(rx_crc_errors),
1221 GBENU_STATS_P6(rx_align_code_errors),
1222 GBENU_STATS_P6(rx_oversized_frames),
1223 GBENU_STATS_P6(rx_jabber_frames),
1224 GBENU_STATS_P6(rx_undersized_frames),
1225 GBENU_STATS_P6(rx_fragments),
1226 GBENU_STATS_P6(ale_drop),
1227 GBENU_STATS_P6(ale_overrun_drop),
1228 GBENU_STATS_P6(rx_bytes),
1229 GBENU_STATS_P6(tx_good_frames),
1230 GBENU_STATS_P6(tx_broadcast_frames),
1231 GBENU_STATS_P6(tx_multicast_frames),
1232 GBENU_STATS_P6(tx_pause_frames),
1233 GBENU_STATS_P6(tx_deferred_frames),
1234 GBENU_STATS_P6(tx_collision_frames),
1235 GBENU_STATS_P6(tx_single_coll_frames),
1236 GBENU_STATS_P6(tx_mult_coll_frames),
1237 GBENU_STATS_P6(tx_excessive_collisions),
1238 GBENU_STATS_P6(tx_late_collisions),
1239 GBENU_STATS_P6(rx_ipg_error),
1240 GBENU_STATS_P6(tx_carrier_sense_errors),
1241 GBENU_STATS_P6(tx_bytes),
1242 GBENU_STATS_P6(tx_64B_frames),
1243 GBENU_STATS_P6(tx_65_to_127B_frames),
1244 GBENU_STATS_P6(tx_128_to_255B_frames),
1245 GBENU_STATS_P6(tx_256_to_511B_frames),
1246 GBENU_STATS_P6(tx_512_to_1023B_frames),
1247 GBENU_STATS_P6(tx_1024B_frames),
1248 GBENU_STATS_P6(net_bytes),
1249 GBENU_STATS_P6(rx_bottom_fifo_drop),
1250 GBENU_STATS_P6(rx_port_mask_drop),
1251 GBENU_STATS_P6(rx_top_fifo_drop),
1252 GBENU_STATS_P6(ale_rate_limit_drop),
1253 GBENU_STATS_P6(ale_vid_ingress_drop),
1254 GBENU_STATS_P6(ale_da_eq_sa_drop),
1255 GBENU_STATS_P6(ale_unknown_ucast),
1256 GBENU_STATS_P6(ale_unknown_ucast_bytes),
1257 GBENU_STATS_P6(ale_unknown_mcast),
1258 GBENU_STATS_P6(ale_unknown_mcast_bytes),
1259 GBENU_STATS_P6(ale_unknown_bcast),
1260 GBENU_STATS_P6(ale_unknown_bcast_bytes),
1261 GBENU_STATS_P6(tx_mem_protect_err),
1262 /* GBENU Module 7 */
1263 GBENU_STATS_P7(rx_good_frames),
1264 GBENU_STATS_P7(rx_broadcast_frames),
1265 GBENU_STATS_P7(rx_multicast_frames),
1266 GBENU_STATS_P7(rx_pause_frames),
1267 GBENU_STATS_P7(rx_crc_errors),
1268 GBENU_STATS_P7(rx_align_code_errors),
1269 GBENU_STATS_P7(rx_oversized_frames),
1270 GBENU_STATS_P7(rx_jabber_frames),
1271 GBENU_STATS_P7(rx_undersized_frames),
1272 GBENU_STATS_P7(rx_fragments),
1273 GBENU_STATS_P7(ale_drop),
1274 GBENU_STATS_P7(ale_overrun_drop),
1275 GBENU_STATS_P7(rx_bytes),
1276 GBENU_STATS_P7(tx_good_frames),
1277 GBENU_STATS_P7(tx_broadcast_frames),
1278 GBENU_STATS_P7(tx_multicast_frames),
1279 GBENU_STATS_P7(tx_pause_frames),
1280 GBENU_STATS_P7(tx_deferred_frames),
1281 GBENU_STATS_P7(tx_collision_frames),
1282 GBENU_STATS_P7(tx_single_coll_frames),
1283 GBENU_STATS_P7(tx_mult_coll_frames),
1284 GBENU_STATS_P7(tx_excessive_collisions),
1285 GBENU_STATS_P7(tx_late_collisions),
1286 GBENU_STATS_P7(rx_ipg_error),
1287 GBENU_STATS_P7(tx_carrier_sense_errors),
1288 GBENU_STATS_P7(tx_bytes),
1289 GBENU_STATS_P7(tx_64B_frames),
1290 GBENU_STATS_P7(tx_65_to_127B_frames),
1291 GBENU_STATS_P7(tx_128_to_255B_frames),
1292 GBENU_STATS_P7(tx_256_to_511B_frames),
1293 GBENU_STATS_P7(tx_512_to_1023B_frames),
1294 GBENU_STATS_P7(tx_1024B_frames),
1295 GBENU_STATS_P7(net_bytes),
1296 GBENU_STATS_P7(rx_bottom_fifo_drop),
1297 GBENU_STATS_P7(rx_port_mask_drop),
1298 GBENU_STATS_P7(rx_top_fifo_drop),
1299 GBENU_STATS_P7(ale_rate_limit_drop),
1300 GBENU_STATS_P7(ale_vid_ingress_drop),
1301 GBENU_STATS_P7(ale_da_eq_sa_drop),
1302 GBENU_STATS_P7(ale_unknown_ucast),
1303 GBENU_STATS_P7(ale_unknown_ucast_bytes),
1304 GBENU_STATS_P7(ale_unknown_mcast),
1305 GBENU_STATS_P7(ale_unknown_mcast_bytes),
1306 GBENU_STATS_P7(ale_unknown_bcast),
1307 GBENU_STATS_P7(ale_unknown_bcast_bytes),
1308 GBENU_STATS_P7(tx_mem_protect_err),
1309 /* GBENU Module 8 */
1310 GBENU_STATS_P8(rx_good_frames),
1311 GBENU_STATS_P8(rx_broadcast_frames),
1312 GBENU_STATS_P8(rx_multicast_frames),
1313 GBENU_STATS_P8(rx_pause_frames),
1314 GBENU_STATS_P8(rx_crc_errors),
1315 GBENU_STATS_P8(rx_align_code_errors),
1316 GBENU_STATS_P8(rx_oversized_frames),
1317 GBENU_STATS_P8(rx_jabber_frames),
1318 GBENU_STATS_P8(rx_undersized_frames),
1319 GBENU_STATS_P8(rx_fragments),
1320 GBENU_STATS_P8(ale_drop),
1321 GBENU_STATS_P8(ale_overrun_drop),
1322 GBENU_STATS_P8(rx_bytes),
1323 GBENU_STATS_P8(tx_good_frames),
1324 GBENU_STATS_P8(tx_broadcast_frames),
1325 GBENU_STATS_P8(tx_multicast_frames),
1326 GBENU_STATS_P8(tx_pause_frames),
1327 GBENU_STATS_P8(tx_deferred_frames),
1328 GBENU_STATS_P8(tx_collision_frames),
1329 GBENU_STATS_P8(tx_single_coll_frames),
1330 GBENU_STATS_P8(tx_mult_coll_frames),
1331 GBENU_STATS_P8(tx_excessive_collisions),
1332 GBENU_STATS_P8(tx_late_collisions),
1333 GBENU_STATS_P8(rx_ipg_error),
1334 GBENU_STATS_P8(tx_carrier_sense_errors),
1335 GBENU_STATS_P8(tx_bytes),
1336 GBENU_STATS_P8(tx_64B_frames),
1337 GBENU_STATS_P8(tx_65_to_127B_frames),
1338 GBENU_STATS_P8(tx_128_to_255B_frames),
1339 GBENU_STATS_P8(tx_256_to_511B_frames),
1340 GBENU_STATS_P8(tx_512_to_1023B_frames),
1341 GBENU_STATS_P8(tx_1024B_frames),
1342 GBENU_STATS_P8(net_bytes),
1343 GBENU_STATS_P8(rx_bottom_fifo_drop),
1344 GBENU_STATS_P8(rx_port_mask_drop),
1345 GBENU_STATS_P8(rx_top_fifo_drop),
1346 GBENU_STATS_P8(ale_rate_limit_drop),
1347 GBENU_STATS_P8(ale_vid_ingress_drop),
1348 GBENU_STATS_P8(ale_da_eq_sa_drop),
1349 GBENU_STATS_P8(ale_unknown_ucast),
1350 GBENU_STATS_P8(ale_unknown_ucast_bytes),
1351 GBENU_STATS_P8(ale_unknown_mcast),
1352 GBENU_STATS_P8(ale_unknown_mcast_bytes),
1353 GBENU_STATS_P8(ale_unknown_bcast),
1354 GBENU_STATS_P8(ale_unknown_bcast_bytes),
1355 GBENU_STATS_P8(tx_mem_protect_err),
1356};
1357
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001358#define XGBE_STATS0_INFO(field) \
1359{ \
1360 "GBE_0:"#field, XGBE_STATS0_MODULE, \
1361 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
1362 offsetof(struct xgbe_hw_stats, field) \
1363}
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001364
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001365#define XGBE_STATS1_INFO(field) \
1366{ \
1367 "GBE_1:"#field, XGBE_STATS1_MODULE, \
1368 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
1369 offsetof(struct xgbe_hw_stats, field) \
1370}
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001371
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001372#define XGBE_STATS2_INFO(field) \
1373{ \
1374 "GBE_2:"#field, XGBE_STATS2_MODULE, \
1375 FIELD_SIZEOF(struct xgbe_hw_stats, field), \
1376 offsetof(struct xgbe_hw_stats, field) \
1377}
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001378
1379static const struct netcp_ethtool_stat xgbe10_et_stats[] = {
1380 /* GBE module 0 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001381 XGBE_STATS0_INFO(rx_good_frames),
1382 XGBE_STATS0_INFO(rx_broadcast_frames),
1383 XGBE_STATS0_INFO(rx_multicast_frames),
1384 XGBE_STATS0_INFO(rx_oversized_frames),
1385 XGBE_STATS0_INFO(rx_undersized_frames),
1386 XGBE_STATS0_INFO(overrun_type4),
1387 XGBE_STATS0_INFO(overrun_type5),
1388 XGBE_STATS0_INFO(rx_bytes),
1389 XGBE_STATS0_INFO(tx_good_frames),
1390 XGBE_STATS0_INFO(tx_broadcast_frames),
1391 XGBE_STATS0_INFO(tx_multicast_frames),
1392 XGBE_STATS0_INFO(tx_bytes),
1393 XGBE_STATS0_INFO(tx_64byte_frames),
1394 XGBE_STATS0_INFO(tx_65_to_127byte_frames),
1395 XGBE_STATS0_INFO(tx_128_to_255byte_frames),
1396 XGBE_STATS0_INFO(tx_256_to_511byte_frames),
1397 XGBE_STATS0_INFO(tx_512_to_1023byte_frames),
1398 XGBE_STATS0_INFO(tx_1024byte_frames),
1399 XGBE_STATS0_INFO(net_bytes),
1400 XGBE_STATS0_INFO(rx_sof_overruns),
1401 XGBE_STATS0_INFO(rx_mof_overruns),
1402 XGBE_STATS0_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001403 /* XGBE module 1 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001404 XGBE_STATS1_INFO(rx_good_frames),
1405 XGBE_STATS1_INFO(rx_broadcast_frames),
1406 XGBE_STATS1_INFO(rx_multicast_frames),
1407 XGBE_STATS1_INFO(rx_pause_frames),
1408 XGBE_STATS1_INFO(rx_crc_errors),
1409 XGBE_STATS1_INFO(rx_align_code_errors),
1410 XGBE_STATS1_INFO(rx_oversized_frames),
1411 XGBE_STATS1_INFO(rx_jabber_frames),
1412 XGBE_STATS1_INFO(rx_undersized_frames),
1413 XGBE_STATS1_INFO(rx_fragments),
1414 XGBE_STATS1_INFO(overrun_type4),
1415 XGBE_STATS1_INFO(overrun_type5),
1416 XGBE_STATS1_INFO(rx_bytes),
1417 XGBE_STATS1_INFO(tx_good_frames),
1418 XGBE_STATS1_INFO(tx_broadcast_frames),
1419 XGBE_STATS1_INFO(tx_multicast_frames),
1420 XGBE_STATS1_INFO(tx_pause_frames),
1421 XGBE_STATS1_INFO(tx_deferred_frames),
1422 XGBE_STATS1_INFO(tx_collision_frames),
1423 XGBE_STATS1_INFO(tx_single_coll_frames),
1424 XGBE_STATS1_INFO(tx_mult_coll_frames),
1425 XGBE_STATS1_INFO(tx_excessive_collisions),
1426 XGBE_STATS1_INFO(tx_late_collisions),
1427 XGBE_STATS1_INFO(tx_underrun),
1428 XGBE_STATS1_INFO(tx_carrier_sense_errors),
1429 XGBE_STATS1_INFO(tx_bytes),
1430 XGBE_STATS1_INFO(tx_64byte_frames),
1431 XGBE_STATS1_INFO(tx_65_to_127byte_frames),
1432 XGBE_STATS1_INFO(tx_128_to_255byte_frames),
1433 XGBE_STATS1_INFO(tx_256_to_511byte_frames),
1434 XGBE_STATS1_INFO(tx_512_to_1023byte_frames),
1435 XGBE_STATS1_INFO(tx_1024byte_frames),
1436 XGBE_STATS1_INFO(net_bytes),
1437 XGBE_STATS1_INFO(rx_sof_overruns),
1438 XGBE_STATS1_INFO(rx_mof_overruns),
1439 XGBE_STATS1_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001440 /* XGBE module 2 */
Karicheri, Muralidharanda866ba2015-03-20 16:11:24 -04001441 XGBE_STATS2_INFO(rx_good_frames),
1442 XGBE_STATS2_INFO(rx_broadcast_frames),
1443 XGBE_STATS2_INFO(rx_multicast_frames),
1444 XGBE_STATS2_INFO(rx_pause_frames),
1445 XGBE_STATS2_INFO(rx_crc_errors),
1446 XGBE_STATS2_INFO(rx_align_code_errors),
1447 XGBE_STATS2_INFO(rx_oversized_frames),
1448 XGBE_STATS2_INFO(rx_jabber_frames),
1449 XGBE_STATS2_INFO(rx_undersized_frames),
1450 XGBE_STATS2_INFO(rx_fragments),
1451 XGBE_STATS2_INFO(overrun_type4),
1452 XGBE_STATS2_INFO(overrun_type5),
1453 XGBE_STATS2_INFO(rx_bytes),
1454 XGBE_STATS2_INFO(tx_good_frames),
1455 XGBE_STATS2_INFO(tx_broadcast_frames),
1456 XGBE_STATS2_INFO(tx_multicast_frames),
1457 XGBE_STATS2_INFO(tx_pause_frames),
1458 XGBE_STATS2_INFO(tx_deferred_frames),
1459 XGBE_STATS2_INFO(tx_collision_frames),
1460 XGBE_STATS2_INFO(tx_single_coll_frames),
1461 XGBE_STATS2_INFO(tx_mult_coll_frames),
1462 XGBE_STATS2_INFO(tx_excessive_collisions),
1463 XGBE_STATS2_INFO(tx_late_collisions),
1464 XGBE_STATS2_INFO(tx_underrun),
1465 XGBE_STATS2_INFO(tx_carrier_sense_errors),
1466 XGBE_STATS2_INFO(tx_bytes),
1467 XGBE_STATS2_INFO(tx_64byte_frames),
1468 XGBE_STATS2_INFO(tx_65_to_127byte_frames),
1469 XGBE_STATS2_INFO(tx_128_to_255byte_frames),
1470 XGBE_STATS2_INFO(tx_256_to_511byte_frames),
1471 XGBE_STATS2_INFO(tx_512_to_1023byte_frames),
1472 XGBE_STATS2_INFO(tx_1024byte_frames),
1473 XGBE_STATS2_INFO(net_bytes),
1474 XGBE_STATS2_INFO(rx_sof_overruns),
1475 XGBE_STATS2_INFO(rx_mof_overruns),
1476 XGBE_STATS2_INFO(rx_dma_overruns),
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001477};
1478
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001479#define for_each_intf(i, priv) \
1480 list_for_each_entry((i), &(priv)->gbe_intf_head, gbe_intf_list)
1481
1482#define for_each_sec_slave(slave, priv) \
1483 list_for_each_entry((slave), &(priv)->secondary_slaves, slave_list)
1484
1485#define first_sec_slave(priv) \
1486 list_first_entry(&priv->secondary_slaves, \
1487 struct gbe_slave, slave_list)
1488
1489static void keystone_get_drvinfo(struct net_device *ndev,
1490 struct ethtool_drvinfo *info)
1491{
1492 strncpy(info->driver, NETCP_DRIVER_NAME, sizeof(info->driver));
1493 strncpy(info->version, NETCP_DRIVER_VERSION, sizeof(info->version));
1494}
1495
1496static u32 keystone_get_msglevel(struct net_device *ndev)
1497{
1498 struct netcp_intf *netcp = netdev_priv(ndev);
1499
1500 return netcp->msg_enable;
1501}
1502
1503static void keystone_set_msglevel(struct net_device *ndev, u32 value)
1504{
1505 struct netcp_intf *netcp = netdev_priv(ndev);
1506
1507 netcp->msg_enable = value;
1508}
1509
1510static void keystone_get_stat_strings(struct net_device *ndev,
1511 uint32_t stringset, uint8_t *data)
1512{
1513 struct netcp_intf *netcp = netdev_priv(ndev);
1514 struct gbe_intf *gbe_intf;
1515 struct gbe_priv *gbe_dev;
1516 int i;
1517
1518 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1519 if (!gbe_intf)
1520 return;
1521 gbe_dev = gbe_intf->gbe_dev;
1522
1523 switch (stringset) {
1524 case ETH_SS_STATS:
1525 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1526 memcpy(data, gbe_dev->et_stats[i].desc,
1527 ETH_GSTRING_LEN);
1528 data += ETH_GSTRING_LEN;
1529 }
1530 break;
1531 case ETH_SS_TEST:
1532 break;
1533 }
1534}
1535
1536static int keystone_get_sset_count(struct net_device *ndev, int stringset)
1537{
1538 struct netcp_intf *netcp = netdev_priv(ndev);
1539 struct gbe_intf *gbe_intf;
1540 struct gbe_priv *gbe_dev;
1541
1542 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1543 if (!gbe_intf)
1544 return -EINVAL;
1545 gbe_dev = gbe_intf->gbe_dev;
1546
1547 switch (stringset) {
1548 case ETH_SS_TEST:
1549 return 0;
1550 case ETH_SS_STATS:
1551 return gbe_dev->num_et_stats;
1552 default:
1553 return -EINVAL;
1554 }
1555}
1556
1557static void gbe_update_stats(struct gbe_priv *gbe_dev, uint64_t *data)
1558{
1559 void __iomem *base = NULL;
1560 u32 __iomem *p;
1561 u32 tmp = 0;
1562 int i;
1563
1564 for (i = 0; i < gbe_dev->num_et_stats; i++) {
1565 base = gbe_dev->hw_stats_regs[gbe_dev->et_stats[i].type];
1566 p = base + gbe_dev->et_stats[i].offset;
1567 tmp = readl(p);
1568 gbe_dev->hw_stats[i] = gbe_dev->hw_stats[i] + tmp;
1569 if (data)
1570 data[i] = gbe_dev->hw_stats[i];
1571 /* write-to-decrement:
1572 * new register value = old register value - write value
1573 */
1574 writel(tmp, p);
1575 }
1576}
1577
1578static void gbe_update_stats_ver14(struct gbe_priv *gbe_dev, uint64_t *data)
1579{
1580 void __iomem *gbe_statsa = gbe_dev->hw_stats_regs[0];
1581 void __iomem *gbe_statsb = gbe_dev->hw_stats_regs[1];
1582 u64 *hw_stats = &gbe_dev->hw_stats[0];
1583 void __iomem *base = NULL;
1584 u32 __iomem *p;
1585 u32 tmp = 0, val, pair_size = (gbe_dev->num_et_stats / 2);
1586 int i, j, pair;
1587
1588 for (pair = 0; pair < 2; pair++) {
1589 val = readl(GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1590
1591 if (pair == 0)
1592 val &= ~GBE_STATS_CD_SEL;
1593 else
1594 val |= GBE_STATS_CD_SEL;
1595
1596 /* make the stat modules visible */
1597 writel(val, GBE_REG_ADDR(gbe_dev, switch_regs, stat_port_en));
1598
1599 for (i = 0; i < pair_size; i++) {
1600 j = pair * pair_size + i;
1601 switch (gbe_dev->et_stats[j].type) {
1602 case GBE_STATSA_MODULE:
1603 case GBE_STATSC_MODULE:
1604 base = gbe_statsa;
1605 break;
1606 case GBE_STATSB_MODULE:
1607 case GBE_STATSD_MODULE:
1608 base = gbe_statsb;
1609 break;
1610 }
1611
1612 p = base + gbe_dev->et_stats[j].offset;
1613 tmp = readl(p);
1614 hw_stats[j] += tmp;
1615 if (data)
1616 data[j] = hw_stats[j];
1617 /* write-to-decrement:
1618 * new register value = old register value - write value
1619 */
1620 writel(tmp, p);
1621 }
1622 }
1623}
1624
1625static void keystone_get_ethtool_stats(struct net_device *ndev,
1626 struct ethtool_stats *stats,
1627 uint64_t *data)
1628{
1629 struct netcp_intf *netcp = netdev_priv(ndev);
1630 struct gbe_intf *gbe_intf;
1631 struct gbe_priv *gbe_dev;
1632
1633 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1634 if (!gbe_intf)
1635 return;
1636
1637 gbe_dev = gbe_intf->gbe_dev;
1638 spin_lock_bh(&gbe_dev->hw_stats_lock);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001639 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1640 gbe_update_stats_ver14(gbe_dev, data);
1641 else
1642 gbe_update_stats(gbe_dev, data);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001643 spin_unlock_bh(&gbe_dev->hw_stats_lock);
1644}
1645
1646static int keystone_get_settings(struct net_device *ndev,
1647 struct ethtool_cmd *cmd)
1648{
1649 struct netcp_intf *netcp = netdev_priv(ndev);
1650 struct phy_device *phy = ndev->phydev;
1651 struct gbe_intf *gbe_intf;
1652 int ret;
1653
1654 if (!phy)
1655 return -EINVAL;
1656
1657 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1658 if (!gbe_intf)
1659 return -EINVAL;
1660
1661 if (!gbe_intf->slave)
1662 return -EINVAL;
1663
1664 ret = phy_ethtool_gset(phy, cmd);
1665 if (!ret)
1666 cmd->port = gbe_intf->slave->phy_port_t;
1667
1668 return ret;
1669}
1670
1671static int keystone_set_settings(struct net_device *ndev,
1672 struct ethtool_cmd *cmd)
1673{
1674 struct netcp_intf *netcp = netdev_priv(ndev);
1675 struct phy_device *phy = ndev->phydev;
1676 struct gbe_intf *gbe_intf;
1677 u32 features = cmd->advertising & cmd->supported;
1678
1679 if (!phy)
1680 return -EINVAL;
1681
1682 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1683 if (!gbe_intf)
1684 return -EINVAL;
1685
1686 if (!gbe_intf->slave)
1687 return -EINVAL;
1688
1689 if (cmd->port != gbe_intf->slave->phy_port_t) {
1690 if ((cmd->port == PORT_TP) && !(features & ADVERTISED_TP))
1691 return -EINVAL;
1692
1693 if ((cmd->port == PORT_AUI) && !(features & ADVERTISED_AUI))
1694 return -EINVAL;
1695
1696 if ((cmd->port == PORT_BNC) && !(features & ADVERTISED_BNC))
1697 return -EINVAL;
1698
1699 if ((cmd->port == PORT_MII) && !(features & ADVERTISED_MII))
1700 return -EINVAL;
1701
1702 if ((cmd->port == PORT_FIBRE) && !(features & ADVERTISED_FIBRE))
1703 return -EINVAL;
1704 }
1705
1706 gbe_intf->slave->phy_port_t = cmd->port;
1707 return phy_ethtool_sset(phy, cmd);
1708}
1709
1710static const struct ethtool_ops keystone_ethtool_ops = {
1711 .get_drvinfo = keystone_get_drvinfo,
1712 .get_link = ethtool_op_get_link,
1713 .get_msglevel = keystone_get_msglevel,
1714 .set_msglevel = keystone_set_msglevel,
1715 .get_strings = keystone_get_stat_strings,
1716 .get_sset_count = keystone_get_sset_count,
1717 .get_ethtool_stats = keystone_get_ethtool_stats,
1718 .get_settings = keystone_get_settings,
1719 .set_settings = keystone_set_settings,
1720};
1721
1722#define mac_hi(mac) (((mac)[0] << 0) | ((mac)[1] << 8) | \
1723 ((mac)[2] << 16) | ((mac)[3] << 24))
1724#define mac_lo(mac) (((mac)[4] << 0) | ((mac)[5] << 8))
1725
1726static void gbe_set_slave_mac(struct gbe_slave *slave,
1727 struct gbe_intf *gbe_intf)
1728{
1729 struct net_device *ndev = gbe_intf->ndev;
1730
1731 writel(mac_hi(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_hi));
1732 writel(mac_lo(ndev->dev_addr), GBE_REG_ADDR(slave, port_regs, sa_lo));
1733}
1734
1735static int gbe_get_slave_port(struct gbe_priv *priv, u32 slave_num)
1736{
1737 if (priv->host_port == 0)
1738 return slave_num + 1;
1739
1740 return slave_num;
1741}
1742
1743static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
1744 struct net_device *ndev,
1745 struct gbe_slave *slave,
1746 int up)
1747{
1748 struct phy_device *phy = slave->phy;
1749 u32 mac_control = 0;
1750
1751 if (up) {
1752 mac_control = slave->mac_control;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001753 if (phy && (phy->speed == SPEED_1000)) {
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001754 mac_control |= MACSL_GIG_MODE;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001755 mac_control &= ~MACSL_XGIG_MODE;
1756 } else if (phy && (phy->speed == SPEED_10000)) {
1757 mac_control |= MACSL_XGIG_MODE;
1758 mac_control &= ~MACSL_GIG_MODE;
1759 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001760
1761 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1762 mac_control));
1763
1764 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1765 ALE_PORT_STATE,
1766 ALE_PORT_STATE_FORWARD);
1767
Karicheri, Muralidharan8e046d62015-04-27 14:12:43 -04001768 if (ndev && slave->open &&
1769 slave->link_interface != SGMII_LINK_MAC_PHY &&
1770 slave->link_interface != XGMII_LINK_MAC_PHY)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001771 netif_carrier_on(ndev);
1772 } else {
1773 writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
1774 mac_control));
1775 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1776 ALE_PORT_STATE,
1777 ALE_PORT_STATE_DISABLE);
Karicheri, Muralidharan8e046d62015-04-27 14:12:43 -04001778 if (ndev &&
1779 slave->link_interface != SGMII_LINK_MAC_PHY &&
1780 slave->link_interface != XGMII_LINK_MAC_PHY)
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001781 netif_carrier_off(ndev);
1782 }
1783
1784 if (phy)
1785 phy_print_status(phy);
1786}
1787
1788static bool gbe_phy_link_status(struct gbe_slave *slave)
1789{
1790 return !slave->phy || slave->phy->link;
1791}
1792
1793static void netcp_ethss_update_link_state(struct gbe_priv *gbe_dev,
1794 struct gbe_slave *slave,
1795 struct net_device *ndev)
1796{
1797 int sp = slave->slave_num;
1798 int phy_link_state, sgmii_link_state = 1, link_state;
1799
1800 if (!slave->open)
1801 return;
1802
WingMan Kwok9a391c72015-03-20 16:11:25 -04001803 if (!SLAVE_LINK_IS_XGMII(slave)) {
1804 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
1805 sgmii_link_state =
1806 netcp_sgmii_get_port_link(SGMII_BASE(sp), sp);
1807 else
1808 sgmii_link_state =
1809 netcp_sgmii_get_port_link(
1810 gbe_dev->sgmii_port_regs, sp);
1811 }
1812
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001813 phy_link_state = gbe_phy_link_status(slave);
1814 link_state = phy_link_state & sgmii_link_state;
1815
1816 if (atomic_xchg(&slave->link_state, link_state) != link_state)
1817 netcp_ethss_link_state_action(gbe_dev, ndev, slave,
1818 link_state);
1819}
1820
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001821static void xgbe_adjust_link(struct net_device *ndev)
1822{
1823 struct netcp_intf *netcp = netdev_priv(ndev);
1824 struct gbe_intf *gbe_intf;
1825
1826 gbe_intf = netcp_module_get_intf_data(&xgbe_module, netcp);
1827 if (!gbe_intf)
1828 return;
1829
1830 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1831 ndev);
1832}
1833
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001834static void gbe_adjust_link(struct net_device *ndev)
1835{
1836 struct netcp_intf *netcp = netdev_priv(ndev);
1837 struct gbe_intf *gbe_intf;
1838
1839 gbe_intf = netcp_module_get_intf_data(&gbe_module, netcp);
1840 if (!gbe_intf)
1841 return;
1842
1843 netcp_ethss_update_link_state(gbe_intf->gbe_dev, gbe_intf->slave,
1844 ndev);
1845}
1846
1847static void gbe_adjust_link_sec_slaves(struct net_device *ndev)
1848{
1849 struct gbe_priv *gbe_dev = netdev_priv(ndev);
1850 struct gbe_slave *slave;
1851
1852 for_each_sec_slave(slave, gbe_dev)
1853 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
1854}
1855
1856/* Reset EMAC
1857 * Soft reset is set and polled until clear, or until a timeout occurs
1858 */
1859static int gbe_port_reset(struct gbe_slave *slave)
1860{
1861 u32 i, v;
1862
1863 /* Set the soft reset bit */
1864 writel(SOFT_RESET, GBE_REG_ADDR(slave, emac_regs, soft_reset));
1865
1866 /* Wait for the bit to clear */
1867 for (i = 0; i < DEVICE_EMACSL_RESET_POLL_COUNT; i++) {
1868 v = readl(GBE_REG_ADDR(slave, emac_regs, soft_reset));
1869 if ((v & SOFT_RESET_MASK) != SOFT_RESET)
1870 return 0;
1871 }
1872
1873 /* Timeout on the reset */
1874 return GMACSL_RET_WARN_RESET_INCOMPLETE;
1875}
1876
1877/* Configure EMAC */
1878static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1879 int max_rx_len)
1880{
WingMan Kwok9a391c72015-03-20 16:11:25 -04001881 void __iomem *rx_maxlen_reg;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001882 u32 xgmii_mode;
1883
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001884 if (max_rx_len > NETCP_MAX_FRAME_SIZE)
1885 max_rx_len = NETCP_MAX_FRAME_SIZE;
1886
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001887 /* Enable correct MII mode at SS level */
1888 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) &&
1889 (slave->link_interface >= XGMII_LINK_MAC_PHY)) {
1890 xgmii_mode = readl(GBE_REG_ADDR(gbe_dev, ss_regs, control));
1891 xgmii_mode |= (1 << slave->slave_num);
1892 writel(xgmii_mode, GBE_REG_ADDR(gbe_dev, ss_regs, control));
1893 }
1894
WingMan Kwok9a391c72015-03-20 16:11:25 -04001895 if (IS_SS_ID_MU(gbe_dev))
1896 rx_maxlen_reg = GBE_REG_ADDR(slave, port_regs, rx_maxlen);
1897 else
1898 rx_maxlen_reg = GBE_REG_ADDR(slave, emac_regs, rx_maxlen);
1899
1900 writel(max_rx_len, rx_maxlen_reg);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902}
1903
1904static void gbe_slave_stop(struct gbe_intf *intf)
1905{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave;
1908
1909 gbe_port_reset(slave);
1910 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
1912 ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
1913 cpsw_ale_del_mcast(gbe_dev->ale, intf->ndev->broadcast,
1914 1 << slave->port_num, 0, 0);
1915
1916 if (!slave->phy)
1917 return;
1918
1919 phy_stop(slave->phy);
1920 phy_disconnect(slave->phy);
1921 slave->phy = NULL;
1922}
1923
1924static void gbe_sgmii_config(struct gbe_priv *priv, struct gbe_slave *slave)
1925{
1926 void __iomem *sgmii_port_regs;
1927
1928 sgmii_port_regs = priv->sgmii_port_regs;
1929 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1930 sgmii_port_regs = priv->sgmii_port34_regs;
1931
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001932 if (!SLAVE_LINK_IS_XGMII(slave)) {
1933 netcp_sgmii_reset(sgmii_port_regs, slave->slave_num);
1934 netcp_sgmii_config(sgmii_port_regs, slave->slave_num,
1935 slave->link_interface);
1936 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001937}
1938
1939static int gbe_slave_open(struct gbe_intf *gbe_intf)
1940{
1941 struct gbe_priv *priv = gbe_intf->gbe_dev;
1942 struct gbe_slave *slave = gbe_intf->slave;
1943 phy_interface_t phy_mode;
1944 bool has_phy = false;
1945
1946 void (*hndlr)(struct net_device *) = gbe_adjust_link;
1947
1948 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave);
1950 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */
1953 cpsw_ale_control_set(priv->ale, slave->port_num,
1954 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
1955 cpsw_ale_add_mcast(priv->ale, gbe_intf->ndev->broadcast,
1956 1 << slave->port_num, 0, 0, ALE_MCAST_FWD_2);
1957
1958 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
1959 has_phy = true;
1960 phy_mode = PHY_INTERFACE_MODE_SGMII;
1961 slave->phy_port_t = PORT_MII;
1962 } else if (slave->link_interface == XGMII_LINK_MAC_PHY) {
1963 has_phy = true;
1964 phy_mode = PHY_INTERFACE_MODE_NA;
1965 slave->phy_port_t = PORT_FIBRE;
1966 }
1967
1968 if (has_phy) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05001969 if (priv->ss_version == XGBE_SS_VERSION_10)
1970 hndlr = xgbe_adjust_link;
1971
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001972 slave->phy = of_phy_connect(gbe_intf->ndev,
1973 slave->phy_node,
1974 hndlr, 0,
1975 phy_mode);
1976 if (!slave->phy) {
1977 dev_err(priv->dev, "phy not found on slave %d\n",
1978 slave->slave_num);
1979 return -ENODEV;
1980 }
1981 dev_dbg(priv->dev, "phy found: id is: 0x%s\n",
1982 dev_name(&slave->phy->dev));
1983 phy_start(slave->phy);
1984 phy_read_status(slave->phy);
1985 }
1986 return 0;
1987}
1988
1989static void gbe_init_host_port(struct gbe_priv *priv)
1990{
1991 int bypass_en = 1;
WingMan Kwok9a391c72015-03-20 16:11:25 -04001992
1993 /* Host Tx Pri */
1994 if (IS_SS_ID_NU(priv))
1995 writel(HOST_TX_PRI_MAP_DEFAULT,
1996 GBE_REG_ADDR(priv, host_port_regs, tx_pri_map));
1997
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05001998 /* Max length register */
1999 writel(NETCP_MAX_FRAME_SIZE, GBE_REG_ADDR(priv, host_port_regs,
2000 rx_maxlen));
2001
2002 cpsw_ale_start(priv->ale);
2003
2004 if (priv->enable_ale)
2005 bypass_en = 0;
2006
2007 cpsw_ale_control_set(priv->ale, 0, ALE_BYPASS, bypass_en);
2008
2009 cpsw_ale_control_set(priv->ale, 0, ALE_NO_PORT_VLAN, 1);
2010
2011 cpsw_ale_control_set(priv->ale, priv->host_port,
2012 ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
2013
2014 cpsw_ale_control_set(priv->ale, 0,
2015 ALE_PORT_UNKNOWN_VLAN_MEMBER,
2016 GBE_PORT_MASK(priv->ale_ports));
2017
2018 cpsw_ale_control_set(priv->ale, 0,
2019 ALE_PORT_UNKNOWN_MCAST_FLOOD,
2020 GBE_PORT_MASK(priv->ale_ports - 1));
2021
2022 cpsw_ale_control_set(priv->ale, 0,
2023 ALE_PORT_UNKNOWN_REG_MCAST_FLOOD,
2024 GBE_PORT_MASK(priv->ale_ports));
2025
2026 cpsw_ale_control_set(priv->ale, 0,
2027 ALE_PORT_UNTAGGED_EGRESS,
2028 GBE_PORT_MASK(priv->ale_ports));
2029}
2030
2031static void gbe_add_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2032{
2033 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2034 u16 vlan_id;
2035
2036 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2037 GBE_PORT_MASK(gbe_dev->ale_ports), 0, 0,
2038 ALE_MCAST_FWD_2);
2039 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2040 cpsw_ale_add_mcast(gbe_dev->ale, addr,
2041 GBE_PORT_MASK(gbe_dev->ale_ports),
2042 ALE_VLAN, vlan_id, ALE_MCAST_FWD_2);
2043 }
2044}
2045
2046static void gbe_add_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2047{
2048 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2049 u16 vlan_id;
2050
2051 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2052
2053 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID)
2054 cpsw_ale_add_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2055 ALE_VLAN, vlan_id);
2056}
2057
2058static void gbe_del_mcast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2059{
2060 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2061 u16 vlan_id;
2062
2063 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, 0, 0);
2064
2065 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2066 cpsw_ale_del_mcast(gbe_dev->ale, addr, 0, ALE_VLAN, vlan_id);
2067 }
2068}
2069
2070static void gbe_del_ucast_addr(struct gbe_intf *gbe_intf, u8 *addr)
2071{
2072 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2073 u16 vlan_id;
2074
2075 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port, 0, 0);
2076
2077 for_each_set_bit(vlan_id, gbe_intf->active_vlans, VLAN_N_VID) {
2078 cpsw_ale_del_ucast(gbe_dev->ale, addr, gbe_dev->host_port,
2079 ALE_VLAN, vlan_id);
2080 }
2081}
2082
2083static int gbe_add_addr(void *intf_priv, struct netcp_addr *naddr)
2084{
2085 struct gbe_intf *gbe_intf = intf_priv;
2086 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2087
2088 dev_dbg(gbe_dev->dev, "ethss adding address %pM, type %d\n",
2089 naddr->addr, naddr->type);
2090
2091 switch (naddr->type) {
2092 case ADDR_MCAST:
2093 case ADDR_BCAST:
2094 gbe_add_mcast_addr(gbe_intf, naddr->addr);
2095 break;
2096 case ADDR_UCAST:
2097 case ADDR_DEV:
2098 gbe_add_ucast_addr(gbe_intf, naddr->addr);
2099 break;
2100 case ADDR_ANY:
2101 /* nothing to do for promiscuous */
2102 default:
2103 break;
2104 }
2105
2106 return 0;
2107}
2108
2109static int gbe_del_addr(void *intf_priv, struct netcp_addr *naddr)
2110{
2111 struct gbe_intf *gbe_intf = intf_priv;
2112 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2113
2114 dev_dbg(gbe_dev->dev, "ethss deleting address %pM, type %d\n",
2115 naddr->addr, naddr->type);
2116
2117 switch (naddr->type) {
2118 case ADDR_MCAST:
2119 case ADDR_BCAST:
2120 gbe_del_mcast_addr(gbe_intf, naddr->addr);
2121 break;
2122 case ADDR_UCAST:
2123 case ADDR_DEV:
2124 gbe_del_ucast_addr(gbe_intf, naddr->addr);
2125 break;
2126 case ADDR_ANY:
2127 /* nothing to do for promiscuous */
2128 default:
2129 break;
2130 }
2131
2132 return 0;
2133}
2134
2135static int gbe_add_vid(void *intf_priv, int vid)
2136{
2137 struct gbe_intf *gbe_intf = intf_priv;
2138 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2139
2140 set_bit(vid, gbe_intf->active_vlans);
2141
2142 cpsw_ale_add_vlan(gbe_dev->ale, vid,
2143 GBE_PORT_MASK(gbe_dev->ale_ports),
2144 GBE_MASK_NO_PORTS,
2145 GBE_PORT_MASK(gbe_dev->ale_ports),
2146 GBE_PORT_MASK(gbe_dev->ale_ports - 1));
2147
2148 return 0;
2149}
2150
2151static int gbe_del_vid(void *intf_priv, int vid)
2152{
2153 struct gbe_intf *gbe_intf = intf_priv;
2154 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2155
2156 cpsw_ale_del_vlan(gbe_dev->ale, vid, 0);
2157 clear_bit(vid, gbe_intf->active_vlans);
2158 return 0;
2159}
2160
2161static int gbe_ioctl(void *intf_priv, struct ifreq *req, int cmd)
2162{
2163 struct gbe_intf *gbe_intf = intf_priv;
2164 struct phy_device *phy = gbe_intf->slave->phy;
2165 int ret = -EOPNOTSUPP;
2166
2167 if (phy)
2168 ret = phy_mii_ioctl(phy, req, cmd);
2169
2170 return ret;
2171}
2172
2173static void netcp_ethss_timer(unsigned long arg)
2174{
2175 struct gbe_priv *gbe_dev = (struct gbe_priv *)arg;
2176 struct gbe_intf *gbe_intf;
2177 struct gbe_slave *slave;
2178
2179 /* Check & update SGMII link state of interfaces */
2180 for_each_intf(gbe_intf, gbe_dev) {
2181 if (!gbe_intf->slave->open)
2182 continue;
2183 netcp_ethss_update_link_state(gbe_dev, gbe_intf->slave,
2184 gbe_intf->ndev);
2185 }
2186
2187 /* Check & update SGMII link state of secondary ports */
2188 for_each_sec_slave(slave, gbe_dev) {
2189 netcp_ethss_update_link_state(gbe_dev, slave, NULL);
2190 }
2191
WingMan Kwokc0f54ed2015-07-23 15:57:19 -04002192 /* A timer runs as a BH, no need to block them */
2193 spin_lock(&gbe_dev->hw_stats_lock);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002194
2195 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2196 gbe_update_stats_ver14(gbe_dev, NULL);
2197 else
2198 gbe_update_stats(gbe_dev, NULL);
2199
WingMan Kwokc0f54ed2015-07-23 15:57:19 -04002200 spin_unlock(&gbe_dev->hw_stats_lock);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002201
2202 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2203 add_timer(&gbe_dev->timer);
2204}
2205
2206static int gbe_tx_hook(int order, void *data, struct netcp_packet *p_info)
2207{
2208 struct gbe_intf *gbe_intf = data;
2209
2210 p_info->tx_pipe = &gbe_intf->tx_pipe;
2211 return 0;
2212}
2213
2214static int gbe_open(void *intf_priv, struct net_device *ndev)
2215{
2216 struct gbe_intf *gbe_intf = intf_priv;
2217 struct gbe_priv *gbe_dev = gbe_intf->gbe_dev;
2218 struct netcp_intf *netcp = netdev_priv(ndev);
2219 struct gbe_slave *slave = gbe_intf->slave;
2220 int port_num = slave->port_num;
2221 u32 reg;
2222 int ret;
2223
2224 reg = readl(GBE_REG_ADDR(gbe_dev, switch_regs, id_ver));
2225 dev_dbg(gbe_dev->dev, "initializing gbe version %d.%d (%d) GBE identification value 0x%x\n",
2226 GBE_MAJOR_VERSION(reg), GBE_MINOR_VERSION(reg),
2227 GBE_RTL_VERSION(reg), GBE_IDENT(reg));
2228
WingMan Kwok9a391c72015-03-20 16:11:25 -04002229 /* For 10G and on NetCP 1.5, use directed to port */
2230 if ((gbe_dev->ss_version == XGBE_SS_VERSION_10) || IS_SS_ID_MU(gbe_dev))
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04002231 gbe_intf->tx_pipe.flags = SWITCH_TO_PORT_IN_TAGINFO;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002232
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04002233 if (gbe_dev->enable_ale)
2234 gbe_intf->tx_pipe.switch_to_port = 0;
2235 else
2236 gbe_intf->tx_pipe.switch_to_port = port_num;
2237
2238 dev_dbg(gbe_dev->dev,
2239 "opened TX channel %s: %p with to port %d, flags %d\n",
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002240 gbe_intf->tx_pipe.dma_chan_name,
2241 gbe_intf->tx_pipe.dma_channel,
Karicheri, Muralidharane170f402015-03-20 16:11:21 -04002242 gbe_intf->tx_pipe.switch_to_port,
2243 gbe_intf->tx_pipe.flags);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002244
2245 gbe_slave_stop(gbe_intf);
2246
2247 /* disable priority elevation and enable statistics on all ports */
2248 writel(0, GBE_REG_ADDR(gbe_dev, switch_regs, ptype));
2249
2250 /* Control register */
2251 writel(GBE_CTL_P0_ENABLE, GBE_REG_ADDR(gbe_dev, switch_regs, control));
2252
2253 /* All statistics enabled and STAT AB visible by default */
WingMan Kwok9a391c72015-03-20 16:11:25 -04002254 writel(gbe_dev->stats_en_mask, GBE_REG_ADDR(gbe_dev, switch_regs,
2255 stat_port_en));
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002256
2257 ret = gbe_slave_open(gbe_intf);
2258 if (ret)
2259 goto fail;
2260
2261 netcp_register_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2262 gbe_intf);
2263
2264 slave->open = true;
2265 netcp_ethss_update_link_state(gbe_dev, slave, ndev);
2266 return 0;
2267
2268fail:
2269 gbe_slave_stop(gbe_intf);
2270 return ret;
2271}
2272
2273static int gbe_close(void *intf_priv, struct net_device *ndev)
2274{
2275 struct gbe_intf *gbe_intf = intf_priv;
2276 struct netcp_intf *netcp = netdev_priv(ndev);
2277
2278 gbe_slave_stop(gbe_intf);
2279 netcp_unregister_txhook(netcp, GBE_TXHOOK_ORDER, gbe_tx_hook,
2280 gbe_intf);
2281
2282 gbe_intf->slave->open = false;
2283 atomic_set(&gbe_intf->slave->link_state, NETCP_LINK_STATE_INVALID);
2284 return 0;
2285}
2286
2287static int init_slave(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
2288 struct device_node *node)
2289{
2290 int port_reg_num;
2291 u32 port_reg_ofs, emac_reg_ofs;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002292 u32 port_reg_blk_sz, emac_reg_blk_sz;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002293
2294 if (of_property_read_u32(node, "slave-port", &slave->slave_num)) {
2295 dev_err(gbe_dev->dev, "missing slave-port parameter\n");
2296 return -EINVAL;
2297 }
2298
2299 if (of_property_read_u32(node, "link-interface",
2300 &slave->link_interface)) {
2301 dev_warn(gbe_dev->dev,
2302 "missing link-interface value defaulting to 1G mac-phy link\n");
2303 slave->link_interface = SGMII_LINK_MAC_PHY;
2304 }
2305
2306 slave->open = false;
2307 slave->phy_node = of_parse_phandle(node, "phy-handle", 0);
2308 slave->port_num = gbe_get_slave_port(gbe_dev, slave->slave_num);
2309
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002310 if (slave->link_interface >= XGMII_LINK_MAC_PHY)
2311 slave->mac_control = GBE_DEF_10G_MAC_CONTROL;
2312 else
2313 slave->mac_control = GBE_DEF_1G_MAC_CONTROL;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002314
2315 /* Emac regs memmap are contiguous but port regs are not */
2316 port_reg_num = slave->slave_num;
2317 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2318 if (slave->slave_num > 1) {
2319 port_reg_ofs = GBE13_SLAVE_PORT2_OFFSET;
2320 port_reg_num -= 2;
2321 } else {
2322 port_reg_ofs = GBE13_SLAVE_PORT_OFFSET;
2323 }
WingMan Kwok9a391c72015-03-20 16:11:25 -04002324 emac_reg_ofs = GBE13_EMAC_OFFSET;
2325 port_reg_blk_sz = 0x30;
2326 emac_reg_blk_sz = 0x40;
2327 } else if (IS_SS_ID_MU(gbe_dev)) {
2328 port_reg_ofs = GBENU_SLAVE_PORT_OFFSET;
2329 emac_reg_ofs = GBENU_EMAC_OFFSET;
2330 port_reg_blk_sz = 0x1000;
2331 emac_reg_blk_sz = 0x1000;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002332 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2333 port_reg_ofs = XGBE10_SLAVE_PORT_OFFSET;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002334 emac_reg_ofs = XGBE10_EMAC_OFFSET;
2335 port_reg_blk_sz = 0x30;
2336 emac_reg_blk_sz = 0x40;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002337 } else {
2338 dev_err(gbe_dev->dev, "unknown ethss(0x%x)\n",
2339 gbe_dev->ss_version);
2340 return -EINVAL;
2341 }
2342
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002343 slave->port_regs = gbe_dev->switch_regs + port_reg_ofs +
WingMan Kwok9a391c72015-03-20 16:11:25 -04002344 (port_reg_blk_sz * port_reg_num);
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002345 slave->emac_regs = gbe_dev->switch_regs + emac_reg_ofs +
WingMan Kwok9a391c72015-03-20 16:11:25 -04002346 (emac_reg_blk_sz * slave->slave_num);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002347
2348 if (gbe_dev->ss_version == GBE_SS_VERSION_14) {
2349 /* Initialize slave port register offsets */
2350 GBE_SET_REG_OFS(slave, port_regs, port_vlan);
2351 GBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2352 GBE_SET_REG_OFS(slave, port_regs, sa_lo);
2353 GBE_SET_REG_OFS(slave, port_regs, sa_hi);
2354 GBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2355 GBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2356 GBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2357 GBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2358 GBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2359
2360 /* Initialize EMAC register offsets */
2361 GBE_SET_REG_OFS(slave, emac_regs, mac_control);
2362 GBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2363 GBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
2364
WingMan Kwok9a391c72015-03-20 16:11:25 -04002365 } else if (IS_SS_ID_MU(gbe_dev)) {
2366 /* Initialize slave port register offsets */
2367 GBENU_SET_REG_OFS(slave, port_regs, port_vlan);
2368 GBENU_SET_REG_OFS(slave, port_regs, tx_pri_map);
2369 GBENU_SET_REG_OFS(slave, port_regs, sa_lo);
2370 GBENU_SET_REG_OFS(slave, port_regs, sa_hi);
2371 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl);
2372 GBENU_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2373 GBENU_SET_REG_OFS(slave, port_regs, ts_vlan);
2374 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2375 GBENU_SET_REG_OFS(slave, port_regs, ts_ctl2);
2376 GBENU_SET_REG_OFS(slave, port_regs, rx_maxlen);
2377
2378 /* Initialize EMAC register offsets */
2379 GBENU_SET_REG_OFS(slave, emac_regs, mac_control);
2380 GBENU_SET_REG_OFS(slave, emac_regs, soft_reset);
2381
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002382 } else if (gbe_dev->ss_version == XGBE_SS_VERSION_10) {
2383 /* Initialize slave port register offsets */
2384 XGBE_SET_REG_OFS(slave, port_regs, port_vlan);
2385 XGBE_SET_REG_OFS(slave, port_regs, tx_pri_map);
2386 XGBE_SET_REG_OFS(slave, port_regs, sa_lo);
2387 XGBE_SET_REG_OFS(slave, port_regs, sa_hi);
2388 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl);
2389 XGBE_SET_REG_OFS(slave, port_regs, ts_seq_ltype);
2390 XGBE_SET_REG_OFS(slave, port_regs, ts_vlan);
2391 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl_ltype2);
2392 XGBE_SET_REG_OFS(slave, port_regs, ts_ctl2);
2393
2394 /* Initialize EMAC register offsets */
2395 XGBE_SET_REG_OFS(slave, emac_regs, mac_control);
2396 XGBE_SET_REG_OFS(slave, emac_regs, soft_reset);
2397 XGBE_SET_REG_OFS(slave, emac_regs, rx_maxlen);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002398 }
2399
2400 atomic_set(&slave->link_state, NETCP_LINK_STATE_INVALID);
2401 return 0;
2402}
2403
2404static void init_secondary_ports(struct gbe_priv *gbe_dev,
2405 struct device_node *node)
2406{
2407 struct device *dev = gbe_dev->dev;
2408 phy_interface_t phy_mode;
2409 struct gbe_priv **priv;
2410 struct device_node *port;
2411 struct gbe_slave *slave;
2412 bool mac_phy_link = false;
2413
2414 for_each_child_of_node(node, port) {
2415 slave = devm_kzalloc(dev, sizeof(*slave), GFP_KERNEL);
2416 if (!slave) {
2417 dev_err(dev,
2418 "memomry alloc failed for secondary port(%s), skipping...\n",
2419 port->name);
2420 continue;
2421 }
2422
2423 if (init_slave(gbe_dev, slave, port)) {
2424 dev_err(dev,
2425 "Failed to initialize secondary port(%s), skipping...\n",
2426 port->name);
2427 devm_kfree(dev, slave);
2428 continue;
2429 }
2430
2431 gbe_sgmii_config(gbe_dev, slave);
2432 gbe_port_reset(slave);
2433 gbe_port_config(gbe_dev, slave, gbe_dev->rx_packet_max);
2434 list_add_tail(&slave->slave_list, &gbe_dev->secondary_slaves);
2435 gbe_dev->num_slaves++;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002436 if ((slave->link_interface == SGMII_LINK_MAC_PHY) ||
2437 (slave->link_interface == XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002438 mac_phy_link = true;
2439
2440 slave->open = true;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002441 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2442 break;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002443 }
2444
2445 /* of_phy_connect() is needed only for MAC-PHY interface */
2446 if (!mac_phy_link)
2447 return;
2448
2449 /* Allocate dummy netdev device for attaching to phy device */
2450 gbe_dev->dummy_ndev = alloc_netdev(sizeof(gbe_dev), "dummy",
2451 NET_NAME_UNKNOWN, ether_setup);
2452 if (!gbe_dev->dummy_ndev) {
2453 dev_err(dev,
2454 "Failed to allocate dummy netdev for secondary ports, skipping phy_connect()...\n");
2455 return;
2456 }
2457 priv = netdev_priv(gbe_dev->dummy_ndev);
2458 *priv = gbe_dev;
2459
2460 if (slave->link_interface == SGMII_LINK_MAC_PHY) {
2461 phy_mode = PHY_INTERFACE_MODE_SGMII;
2462 slave->phy_port_t = PORT_MII;
2463 } else {
2464 phy_mode = PHY_INTERFACE_MODE_NA;
2465 slave->phy_port_t = PORT_FIBRE;
2466 }
2467
2468 for_each_sec_slave(slave, gbe_dev) {
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002469 if ((slave->link_interface != SGMII_LINK_MAC_PHY) &&
2470 (slave->link_interface != XGMII_LINK_MAC_PHY))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002471 continue;
2472 slave->phy =
2473 of_phy_connect(gbe_dev->dummy_ndev,
2474 slave->phy_node,
2475 gbe_adjust_link_sec_slaves,
2476 0, phy_mode);
2477 if (!slave->phy) {
2478 dev_err(dev, "phy not found for slave %d\n",
2479 slave->slave_num);
2480 slave->phy = NULL;
2481 } else {
2482 dev_dbg(dev, "phy found: id is: 0x%s\n",
2483 dev_name(&slave->phy->dev));
2484 phy_start(slave->phy);
2485 phy_read_status(slave->phy);
2486 }
2487 }
2488}
2489
2490static void free_secondary_ports(struct gbe_priv *gbe_dev)
2491{
2492 struct gbe_slave *slave;
2493
2494 for (;;) {
2495 slave = first_sec_slave(gbe_dev);
2496 if (!slave)
2497 break;
2498 if (slave->phy)
2499 phy_disconnect(slave->phy);
2500 list_del(&slave->slave_list);
2501 }
2502 if (gbe_dev->dummy_ndev)
2503 free_netdev(gbe_dev->dummy_ndev);
2504}
2505
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002506static int set_xgbe_ethss10_priv(struct gbe_priv *gbe_dev,
2507 struct device_node *node)
2508{
2509 struct resource res;
2510 void __iomem *regs;
2511 int ret, i;
2512
WingMan Kwok9a391c72015-03-20 16:11:25 -04002513 ret = of_address_to_resource(node, XGBE_SS_REG_INDEX, &res);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002514 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002515 dev_err(gbe_dev->dev,
2516 "Can't xlate xgbe of node(%s) ss address at %d\n",
2517 node->name, XGBE_SS_REG_INDEX);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002518 return ret;
2519 }
2520
2521 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2522 if (IS_ERR(regs)) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002523 dev_err(gbe_dev->dev, "Failed to map xgbe ss register base\n");
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002524 return PTR_ERR(regs);
2525 }
2526 gbe_dev->ss_regs = regs;
2527
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002528 ret = of_address_to_resource(node, XGBE_SM_REG_INDEX, &res);
2529 if (ret) {
2530 dev_err(gbe_dev->dev,
2531 "Can't xlate xgbe of node(%s) sm address at %d\n",
2532 node->name, XGBE_SM_REG_INDEX);
2533 return ret;
2534 }
2535
2536 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2537 if (IS_ERR(regs)) {
2538 dev_err(gbe_dev->dev, "Failed to map xgbe sm register base\n");
2539 return PTR_ERR(regs);
2540 }
2541 gbe_dev->switch_regs = regs;
2542
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002543 ret = of_address_to_resource(node, XGBE_SERDES_REG_INDEX, &res);
2544 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002545 dev_err(gbe_dev->dev,
2546 "Can't xlate xgbe serdes of node(%s) address at %d\n",
2547 node->name, XGBE_SERDES_REG_INDEX);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002548 return ret;
2549 }
2550
2551 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2552 if (IS_ERR(regs)) {
2553 dev_err(gbe_dev->dev, "Failed to map xgbe serdes register base\n");
2554 return PTR_ERR(regs);
2555 }
2556 gbe_dev->xgbe_serdes_regs = regs;
2557
2558 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
WingMan Kwok9a391c72015-03-20 16:11:25 -04002559 XGBE10_NUM_STAT_ENTRIES *
2560 (gbe_dev->max_num_ports) * sizeof(u64),
2561 GFP_KERNEL);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002562 if (!gbe_dev->hw_stats) {
2563 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2564 return -ENOMEM;
2565 }
2566
2567 gbe_dev->ss_version = XGBE_SS_VERSION_10;
2568 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs +
2569 XGBE10_SGMII_MODULE_OFFSET;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002570 gbe_dev->host_port_regs = gbe_dev->ss_regs + XGBE10_HOST_PORT_OFFSET;
2571
WingMan Kwok9a391c72015-03-20 16:11:25 -04002572 for (i = 0; i < gbe_dev->max_num_ports; i++)
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002573 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002574 XGBE10_HW_STATS_OFFSET + (GBE_HW_STATS_REG_MAP_SZ * i);
2575
WingMan Kwok9a391c72015-03-20 16:11:25 -04002576 gbe_dev->ale_reg = gbe_dev->switch_regs + XGBE10_ALE_OFFSET;
2577 gbe_dev->ale_ports = gbe_dev->max_num_ports;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002578 gbe_dev->host_port = XGBE10_HOST_PORT_NUM;
2579 gbe_dev->ale_entries = XGBE10_NUM_ALE_ENTRIES;
2580 gbe_dev->et_stats = xgbe10_et_stats;
2581 gbe_dev->num_et_stats = ARRAY_SIZE(xgbe10_et_stats);
WingMan Kwok9a391c72015-03-20 16:11:25 -04002582 gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002583
2584 /* Subsystem registers */
2585 XGBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2586 XGBE_SET_REG_OFS(gbe_dev, ss_regs, control);
2587
2588 /* Switch module registers */
2589 XGBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2590 XGBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2591 XGBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2592 XGBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2593 XGBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2594
2595 /* Host port registers */
2596 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2597 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2598 XGBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2599 return 0;
2600}
2601
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002602static int get_gbe_resource_version(struct gbe_priv *gbe_dev,
2603 struct device_node *node)
2604{
2605 struct resource res;
2606 void __iomem *regs;
2607 int ret;
2608
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002609 ret = of_address_to_resource(node, GBE_SS_REG_INDEX, &res);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002610 if (ret) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002611 dev_err(gbe_dev->dev,
2612 "Can't translate of node(%s) of gbe ss address at %d\n",
2613 node->name, GBE_SS_REG_INDEX);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002614 return ret;
2615 }
2616
2617 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2618 if (IS_ERR(regs)) {
2619 dev_err(gbe_dev->dev, "Failed to map gbe register base\n");
2620 return PTR_ERR(regs);
2621 }
2622 gbe_dev->ss_regs = regs;
2623 gbe_dev->ss_version = readl(gbe_dev->ss_regs);
2624 return 0;
2625}
2626
2627static int set_gbe_ethss14_priv(struct gbe_priv *gbe_dev,
2628 struct device_node *node)
2629{
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002630 struct resource res;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002631 void __iomem *regs;
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002632 int i, ret;
2633
2634 ret = of_address_to_resource(node, GBE_SGMII34_REG_INDEX, &res);
2635 if (ret) {
2636 dev_err(gbe_dev->dev,
2637 "Can't translate of gbe node(%s) address at index %d\n",
2638 node->name, GBE_SGMII34_REG_INDEX);
2639 return ret;
2640 }
2641
2642 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2643 if (IS_ERR(regs)) {
2644 dev_err(gbe_dev->dev,
2645 "Failed to map gbe sgmii port34 register base\n");
2646 return PTR_ERR(regs);
2647 }
2648 gbe_dev->sgmii_port34_regs = regs;
2649
2650 ret = of_address_to_resource(node, GBE_SM_REG_INDEX, &res);
2651 if (ret) {
2652 dev_err(gbe_dev->dev,
2653 "Can't translate of gbe node(%s) address at index %d\n",
2654 node->name, GBE_SM_REG_INDEX);
2655 return ret;
2656 }
2657
2658 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2659 if (IS_ERR(regs)) {
2660 dev_err(gbe_dev->dev,
2661 "Failed to map gbe switch module register base\n");
2662 return PTR_ERR(regs);
2663 }
2664 gbe_dev->switch_regs = regs;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002665
2666 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2667 GBE13_NUM_HW_STAT_ENTRIES *
WingMan Kwok9a391c72015-03-20 16:11:25 -04002668 gbe_dev->max_num_slaves * sizeof(u64),
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002669 GFP_KERNEL);
2670 if (!gbe_dev->hw_stats) {
2671 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2672 return -ENOMEM;
2673 }
2674
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002675 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBE13_SGMII_MODULE_OFFSET;
2676 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBE13_HOST_PORT_OFFSET;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002677
WingMan Kwok9a391c72015-03-20 16:11:25 -04002678 for (i = 0; i < gbe_dev->max_num_slaves; i++) {
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002679 gbe_dev->hw_stats_regs[i] =
2680 gbe_dev->switch_regs + GBE13_HW_STATS_OFFSET +
2681 (GBE_HW_STATS_REG_MAP_SZ * i);
2682 }
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002683
Karicheri, Muralidharan21e0e0d2015-03-20 16:11:22 -04002684 gbe_dev->ale_reg = gbe_dev->switch_regs + GBE13_ALE_OFFSET;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002685 gbe_dev->ale_ports = gbe_dev->max_num_ports;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002686 gbe_dev->host_port = GBE13_HOST_PORT_NUM;
2687 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2688 gbe_dev->et_stats = gbe13_et_stats;
2689 gbe_dev->num_et_stats = ARRAY_SIZE(gbe13_et_stats);
WingMan Kwok9a391c72015-03-20 16:11:25 -04002690 gbe_dev->stats_en_mask = GBE13_REG_VAL_STAT_ENABLE_ALL;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002691
2692 /* Subsystem registers */
2693 GBE_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2694
2695 /* Switch module registers */
2696 GBE_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2697 GBE_SET_REG_OFS(gbe_dev, switch_regs, control);
2698 GBE_SET_REG_OFS(gbe_dev, switch_regs, soft_reset);
2699 GBE_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2700 GBE_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2701 GBE_SET_REG_OFS(gbe_dev, switch_regs, flow_control);
2702
2703 /* Host port registers */
2704 GBE_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2705 GBE_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2706 return 0;
2707}
2708
WingMan Kwok9a391c72015-03-20 16:11:25 -04002709static int set_gbenu_ethss_priv(struct gbe_priv *gbe_dev,
2710 struct device_node *node)
2711{
2712 struct resource res;
2713 void __iomem *regs;
2714 int i, ret;
2715
2716 gbe_dev->hw_stats = devm_kzalloc(gbe_dev->dev,
2717 GBENU_NUM_HW_STAT_ENTRIES *
2718 (gbe_dev->max_num_ports) * sizeof(u64),
2719 GFP_KERNEL);
2720 if (!gbe_dev->hw_stats) {
2721 dev_err(gbe_dev->dev, "hw_stats memory allocation failed\n");
2722 return -ENOMEM;
2723 }
2724
2725 ret = of_address_to_resource(node, GBENU_SM_REG_INDEX, &res);
2726 if (ret) {
2727 dev_err(gbe_dev->dev,
2728 "Can't translate of gbenu node(%s) addr at index %d\n",
2729 node->name, GBENU_SM_REG_INDEX);
2730 return ret;
2731 }
2732
2733 regs = devm_ioremap_resource(gbe_dev->dev, &res);
2734 if (IS_ERR(regs)) {
2735 dev_err(gbe_dev->dev,
2736 "Failed to map gbenu switch module register base\n");
2737 return PTR_ERR(regs);
2738 }
2739 gbe_dev->switch_regs = regs;
2740
2741 gbe_dev->sgmii_port_regs = gbe_dev->ss_regs + GBENU_SGMII_MODULE_OFFSET;
2742 gbe_dev->host_port_regs = gbe_dev->switch_regs + GBENU_HOST_PORT_OFFSET;
2743
2744 for (i = 0; i < (gbe_dev->max_num_ports); i++)
2745 gbe_dev->hw_stats_regs[i] = gbe_dev->switch_regs +
2746 GBENU_HW_STATS_OFFSET + (GBENU_HW_STATS_REG_MAP_SZ * i);
2747
2748 gbe_dev->ale_reg = gbe_dev->switch_regs + GBENU_ALE_OFFSET;
2749 gbe_dev->ale_ports = gbe_dev->max_num_ports;
2750 gbe_dev->host_port = GBENU_HOST_PORT_NUM;
2751 gbe_dev->ale_entries = GBE13_NUM_ALE_ENTRIES;
2752 gbe_dev->et_stats = gbenu_et_stats;
2753 gbe_dev->stats_en_mask = (1 << (gbe_dev->max_num_ports)) - 1;
2754
2755 if (IS_SS_ID_NU(gbe_dev))
2756 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2757 (gbe_dev->max_num_slaves * GBENU_ET_STATS_PORT_SIZE);
2758 else
2759 gbe_dev->num_et_stats = GBENU_ET_STATS_HOST_SIZE +
2760 GBENU_ET_STATS_PORT_SIZE;
2761
2762 /* Subsystem registers */
2763 GBENU_SET_REG_OFS(gbe_dev, ss_regs, id_ver);
2764
2765 /* Switch module registers */
2766 GBENU_SET_REG_OFS(gbe_dev, switch_regs, id_ver);
2767 GBENU_SET_REG_OFS(gbe_dev, switch_regs, control);
2768 GBENU_SET_REG_OFS(gbe_dev, switch_regs, stat_port_en);
2769 GBENU_SET_REG_OFS(gbe_dev, switch_regs, ptype);
2770
2771 /* Host port registers */
2772 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, port_vlan);
2773 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, rx_maxlen);
2774
2775 /* For NU only. 2U does not need tx_pri_map.
2776 * NU cppi port 0 tx pkt streaming interface has (n-1)*8 egress threads
2777 * while 2U has only 1 such thread
2778 */
2779 GBENU_SET_REG_OFS(gbe_dev, host_port_regs, tx_pri_map);
2780 return 0;
2781}
2782
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002783static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2784 struct device_node *node, void **inst_priv)
2785{
2786 struct device_node *interfaces, *interface;
2787 struct device_node *secondary_ports;
2788 struct cpsw_ale_params ale_params;
2789 struct gbe_priv *gbe_dev;
2790 u32 slave_num;
2791 int ret = 0;
2792
2793 if (!node) {
2794 dev_err(dev, "device tree info unavailable\n");
2795 return -ENODEV;
2796 }
2797
2798 gbe_dev = devm_kzalloc(dev, sizeof(struct gbe_priv), GFP_KERNEL);
2799 if (!gbe_dev)
2800 return -ENOMEM;
2801
WingMan Kwok9a391c72015-03-20 16:11:25 -04002802 if (of_device_is_compatible(node, "ti,netcp-gbe-5") ||
2803 of_device_is_compatible(node, "ti,netcp-gbe")) {
2804 gbe_dev->max_num_slaves = 4;
2805 } else if (of_device_is_compatible(node, "ti,netcp-gbe-9")) {
2806 gbe_dev->max_num_slaves = 8;
2807 } else if (of_device_is_compatible(node, "ti,netcp-gbe-2")) {
2808 gbe_dev->max_num_slaves = 1;
2809 } else if (of_device_is_compatible(node, "ti,netcp-xgbe")) {
2810 gbe_dev->max_num_slaves = 2;
2811 } else {
2812 dev_err(dev, "device tree node for unknown device\n");
2813 return -EINVAL;
2814 }
2815 gbe_dev->max_num_ports = gbe_dev->max_num_slaves + 1;
2816
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002817 gbe_dev->dev = dev;
2818 gbe_dev->netcp_device = netcp_device;
2819 gbe_dev->rx_packet_max = NETCP_MAX_FRAME_SIZE;
2820
2821 /* init the hw stats lock */
2822 spin_lock_init(&gbe_dev->hw_stats_lock);
2823
2824 if (of_find_property(node, "enable-ale", NULL)) {
2825 gbe_dev->enable_ale = true;
2826 dev_info(dev, "ALE enabled\n");
2827 } else {
2828 gbe_dev->enable_ale = false;
2829 dev_dbg(dev, "ALE bypass enabled*\n");
2830 }
2831
2832 ret = of_property_read_u32(node, "tx-queue",
2833 &gbe_dev->tx_queue_id);
2834 if (ret < 0) {
2835 dev_err(dev, "missing tx_queue parameter\n");
2836 gbe_dev->tx_queue_id = GBE_TX_QUEUE;
2837 }
2838
2839 ret = of_property_read_string(node, "tx-channel",
2840 &gbe_dev->dma_chan_name);
2841 if (ret < 0) {
2842 dev_err(dev, "missing \"tx-channel\" parameter\n");
2843 ret = -ENODEV;
2844 goto quit;
2845 }
2846
2847 if (!strcmp(node->name, "gbe")) {
2848 ret = get_gbe_resource_version(gbe_dev, node);
2849 if (ret)
2850 goto quit;
2851
WingMan Kwok9a391c72015-03-20 16:11:25 -04002852 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2853
2854 if (gbe_dev->ss_version == GBE_SS_VERSION_14)
2855 ret = set_gbe_ethss14_priv(gbe_dev, node);
2856 else if (IS_SS_ID_MU(gbe_dev))
2857 ret = set_gbenu_ethss_priv(gbe_dev, node);
2858 else
2859 ret = -ENODEV;
2860
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002861 if (ret)
2862 goto quit;
Wingman Kwok90cff9e2015-01-15 19:12:52 -05002863 } else if (!strcmp(node->name, "xgbe")) {
2864 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2865 if (ret)
2866 goto quit;
2867 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2868 gbe_dev->ss_regs);
2869 if (ret)
2870 goto quit;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002871 } else {
2872 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2873 ret = -ENODEV;
2874 goto quit;
2875 }
2876
2877 interfaces = of_get_child_by_name(node, "interfaces");
2878 if (!interfaces)
2879 dev_err(dev, "could not find interfaces\n");
2880
2881 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2882 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2883 if (ret)
2884 goto quit;
2885
2886 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2887 if (ret)
2888 goto quit;
2889
2890 /* Create network interfaces */
2891 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
2892 for_each_child_of_node(interfaces, interface) {
2893 ret = of_property_read_u32(interface, "slave-port", &slave_num);
2894 if (ret) {
2895 dev_err(dev, "missing slave-port parameter, skipping interface configuration for %s\n",
2896 interface->name);
2897 continue;
2898 }
2899 gbe_dev->num_slaves++;
WingMan Kwok9a391c72015-03-20 16:11:25 -04002900 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2901 break;
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002902 }
2903
2904 if (!gbe_dev->num_slaves)
2905 dev_warn(dev, "No network interface configured\n");
2906
2907 /* Initialize Secondary slave ports */
2908 secondary_ports = of_get_child_by_name(node, "secondary-slave-ports");
2909 INIT_LIST_HEAD(&gbe_dev->secondary_slaves);
WingMan Kwok9a391c72015-03-20 16:11:25 -04002910 if (secondary_ports && (gbe_dev->num_slaves < gbe_dev->max_num_slaves))
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002911 init_secondary_ports(gbe_dev, secondary_ports);
2912 of_node_put(secondary_ports);
2913
2914 if (!gbe_dev->num_slaves) {
2915 dev_err(dev, "No network interface or secondary ports configured\n");
2916 ret = -ENODEV;
2917 goto quit;
2918 }
2919
2920 memset(&ale_params, 0, sizeof(ale_params));
2921 ale_params.dev = gbe_dev->dev;
2922 ale_params.ale_regs = gbe_dev->ale_reg;
2923 ale_params.ale_ageout = GBE_DEFAULT_ALE_AGEOUT;
2924 ale_params.ale_entries = gbe_dev->ale_entries;
2925 ale_params.ale_ports = gbe_dev->ale_ports;
2926
2927 gbe_dev->ale = cpsw_ale_create(&ale_params);
2928 if (!gbe_dev->ale) {
2929 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2930 ret = -ENODEV;
2931 goto quit;
2932 } else {
2933 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2934 }
2935
2936 /* initialize host port */
2937 gbe_init_host_port(gbe_dev);
2938
2939 init_timer(&gbe_dev->timer);
2940 gbe_dev->timer.data = (unsigned long)gbe_dev;
2941 gbe_dev->timer.function = netcp_ethss_timer;
2942 gbe_dev->timer.expires = jiffies + GBE_TIMER_INTERVAL;
2943 add_timer(&gbe_dev->timer);
2944 *inst_priv = gbe_dev;
2945 return 0;
2946
2947quit:
2948 if (gbe_dev->hw_stats)
2949 devm_kfree(dev, gbe_dev->hw_stats);
Markus Elfring9b556692015-02-03 20:12:25 +01002950 cpsw_ale_destroy(gbe_dev->ale);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002951 if (gbe_dev->ss_regs)
2952 devm_iounmap(dev, gbe_dev->ss_regs);
Markus Elfring9b556692015-02-03 20:12:25 +01002953 of_node_put(interfaces);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05002954 devm_kfree(dev, gbe_dev);
2955 return ret;
2956}
2957
2958static int gbe_attach(void *inst_priv, struct net_device *ndev,
2959 struct device_node *node, void **intf_priv)
2960{
2961 struct gbe_priv *gbe_dev = inst_priv;
2962 struct gbe_intf *gbe_intf;
2963 int ret;
2964
2965 if (!node) {
2966 dev_err(gbe_dev->dev, "interface node not available\n");
2967 return -ENODEV;
2968 }
2969
2970 gbe_intf = devm_kzalloc(gbe_dev->dev, sizeof(*gbe_intf), GFP_KERNEL);
2971 if (!gbe_intf)
2972 return -ENOMEM;
2973
2974 gbe_intf->ndev = ndev;
2975 gbe_intf->dev = gbe_dev->dev;
2976 gbe_intf->gbe_dev = gbe_dev;
2977
2978 gbe_intf->slave = devm_kzalloc(gbe_dev->dev,
2979 sizeof(*gbe_intf->slave),
2980 GFP_KERNEL);
2981 if (!gbe_intf->slave) {
2982 ret = -ENOMEM;
2983 goto fail;
2984 }
2985
2986 if (init_slave(gbe_dev, gbe_intf->slave, node)) {
2987 ret = -ENODEV;
2988 goto fail;
2989 }
2990
2991 gbe_intf->tx_pipe = gbe_dev->tx_pipe;
2992 ndev->ethtool_ops = &keystone_ethtool_ops;
2993 list_add_tail(&gbe_intf->gbe_intf_list, &gbe_dev->gbe_intf_head);
2994 *intf_priv = gbe_intf;
2995 return 0;
2996
2997fail:
2998 if (gbe_intf->slave)
2999 devm_kfree(gbe_dev->dev, gbe_intf->slave);
3000 if (gbe_intf)
3001 devm_kfree(gbe_dev->dev, gbe_intf);
3002 return ret;
3003}
3004
3005static int gbe_release(void *intf_priv)
3006{
3007 struct gbe_intf *gbe_intf = intf_priv;
3008
3009 gbe_intf->ndev->ethtool_ops = NULL;
3010 list_del(&gbe_intf->gbe_intf_list);
3011 devm_kfree(gbe_intf->dev, gbe_intf->slave);
3012 devm_kfree(gbe_intf->dev, gbe_intf);
3013 return 0;
3014}
3015
3016static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3017{
3018 struct gbe_priv *gbe_dev = inst_priv;
3019
3020 del_timer_sync(&gbe_dev->timer);
3021 cpsw_ale_stop(gbe_dev->ale);
3022 cpsw_ale_destroy(gbe_dev->ale);
3023 netcp_txpipe_close(&gbe_dev->tx_pipe);
3024 free_secondary_ports(gbe_dev);
3025
3026 if (!list_empty(&gbe_dev->gbe_intf_head))
3027 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n");
3028
3029 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3030 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3031 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3032 devm_kfree(gbe_dev->dev, gbe_dev);
3033 return 0;
3034}
3035
3036static struct netcp_module gbe_module = {
3037 .name = GBE_MODULE_NAME,
3038 .owner = THIS_MODULE,
3039 .primary = true,
3040 .probe = gbe_probe,
3041 .open = gbe_open,
3042 .close = gbe_close,
3043 .remove = gbe_remove,
3044 .attach = gbe_attach,
3045 .release = gbe_release,
3046 .add_addr = gbe_add_addr,
3047 .del_addr = gbe_del_addr,
3048 .add_vid = gbe_add_vid,
3049 .del_vid = gbe_del_vid,
3050 .ioctl = gbe_ioctl,
3051};
3052
Wingman Kwok90cff9e2015-01-15 19:12:52 -05003053static struct netcp_module xgbe_module = {
3054 .name = XGBE_MODULE_NAME,
3055 .owner = THIS_MODULE,
3056 .primary = true,
3057 .probe = gbe_probe,
3058 .open = gbe_open,
3059 .close = gbe_close,
3060 .remove = gbe_remove,
3061 .attach = gbe_attach,
3062 .release = gbe_release,
3063 .add_addr = gbe_add_addr,
3064 .del_addr = gbe_del_addr,
3065 .add_vid = gbe_add_vid,
3066 .del_vid = gbe_del_vid,
3067 .ioctl = gbe_ioctl,
3068};
3069
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003070static int __init keystone_gbe_init(void)
3071{
3072 int ret;
3073
3074 ret = netcp_register_module(&gbe_module);
3075 if (ret)
3076 return ret;
3077
Wingman Kwok90cff9e2015-01-15 19:12:52 -05003078 ret = netcp_register_module(&xgbe_module);
3079 if (ret)
3080 return ret;
3081
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003082 return 0;
3083}
3084module_init(keystone_gbe_init);
3085
3086static void __exit keystone_gbe_exit(void)
3087{
3088 netcp_unregister_module(&gbe_module);
Wingman Kwok90cff9e2015-01-15 19:12:52 -05003089 netcp_unregister_module(&xgbe_module);
Wingman Kwok6f8d3f32015-01-15 19:12:51 -05003090}
3091module_exit(keystone_gbe_exit);
Karicheri, Muralidharan58c11b52015-01-29 18:15:51 -05003092
3093MODULE_LICENSE("GPL v2");
3094MODULE_DESCRIPTION("TI NETCP ETHSS driver for Keystone SOCs");
3095MODULE_AUTHOR("Sandeep Nair <sandeep_n@ti.com");