blob: 287cccf8f7f4e5417d2750609e40ec9e393cee2a [file] [log] [blame]
Andrew Lunna2443fd2019-01-21 19:05:50 +01001// SPDX-License-Identifier: GPL-2.0
Arun Parameswarana1cba562015-10-06 12:25:48 -07002/*
Doug Bergercda792c2017-03-13 17:41:31 -07003 * Copyright (C) 2015-2017 Broadcom
Arun Parameswarana1cba562015-10-06 12:25:48 -07004 */
5
6#include "bcm-phy-lib.h"
Michael Walle11ecf8c2020-05-13 18:35:23 +02007#include <linux/bitfield.h>
Arun Parameswarana1cba562015-10-06 12:25:48 -07008#include <linux/brcmphy.h>
9#include <linux/export.h>
10#include <linux/mdio.h>
Arun Parameswaranb89eb1fc2015-10-15 10:37:13 -070011#include <linux/module.h>
Arun Parameswarana1cba562015-10-06 12:25:48 -070012#include <linux/phy.h>
Florian Fainelli820ee172016-11-29 09:57:17 -080013#include <linux/ethtool.h>
Michael Walle11ecf8c2020-05-13 18:35:23 +020014#include <linux/ethtool_netlink.h>
Arun Parameswarana1cba562015-10-06 12:25:48 -070015
16#define MII_BCM_CHANNEL_WIDTH 0x2000
17#define BCM_CL45VEN_EEE_ADV 0x3c
18
Michael Walle7d7e7bc2020-05-13 18:35:21 +020019int __bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
20{
21 int rc;
22
23 rc = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
24 if (rc < 0)
25 return rc;
26
27 return __phy_write(phydev, MII_BCM54XX_EXP_DATA, val);
28}
29EXPORT_SYMBOL_GPL(__bcm_phy_write_exp);
30
Arun Parameswarana1cba562015-10-06 12:25:48 -070031int bcm_phy_write_exp(struct phy_device *phydev, u16 reg, u16 val)
32{
33 int rc;
34
Michael Walle7d7e7bc2020-05-13 18:35:21 +020035 phy_lock_mdio_bus(phydev);
36 rc = __bcm_phy_write_exp(phydev, reg, val);
37 phy_unlock_mdio_bus(phydev);
Arun Parameswarana1cba562015-10-06 12:25:48 -070038
Michael Walle7d7e7bc2020-05-13 18:35:21 +020039 return rc;
Arun Parameswarana1cba562015-10-06 12:25:48 -070040}
41EXPORT_SYMBOL_GPL(bcm_phy_write_exp);
42
Michael Walle7d7e7bc2020-05-13 18:35:21 +020043int __bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
Arun Parameswarana1cba562015-10-06 12:25:48 -070044{
45 int val;
46
Michael Walle7d7e7bc2020-05-13 18:35:21 +020047 val = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
Arun Parameswarana1cba562015-10-06 12:25:48 -070048 if (val < 0)
49 return val;
50
Michael Walle7d7e7bc2020-05-13 18:35:21 +020051 val = __phy_read(phydev, MII_BCM54XX_EXP_DATA);
Arun Parameswarana1cba562015-10-06 12:25:48 -070052
53 /* Restore default value. It's O.K. if this write fails. */
Michael Walle7d7e7bc2020-05-13 18:35:21 +020054 __phy_write(phydev, MII_BCM54XX_EXP_SEL, 0);
Arun Parameswarana1cba562015-10-06 12:25:48 -070055
56 return val;
57}
Michael Walle7d7e7bc2020-05-13 18:35:21 +020058EXPORT_SYMBOL_GPL(__bcm_phy_read_exp);
59
60int bcm_phy_read_exp(struct phy_device *phydev, u16 reg)
61{
62 int rc;
63
64 phy_lock_mdio_bus(phydev);
65 rc = __bcm_phy_read_exp(phydev, reg);
66 phy_unlock_mdio_bus(phydev);
67
68 return rc;
69}
Arun Parameswarana1cba562015-10-06 12:25:48 -070070EXPORT_SYMBOL_GPL(bcm_phy_read_exp);
71
Michael Wallee184a902020-05-13 18:35:22 +020072int __bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set)
73{
74 int new, ret;
75
76 ret = __phy_write(phydev, MII_BCM54XX_EXP_SEL, reg);
77 if (ret < 0)
78 return ret;
79
80 ret = __phy_read(phydev, MII_BCM54XX_EXP_DATA);
81 if (ret < 0)
82 return ret;
83
84 new = (ret & ~mask) | set;
85 if (new == ret)
86 return 0;
87
88 return __phy_write(phydev, MII_BCM54XX_EXP_DATA, new);
89}
90EXPORT_SYMBOL_GPL(__bcm_phy_modify_exp);
91
92int bcm_phy_modify_exp(struct phy_device *phydev, u16 reg, u16 mask, u16 set)
93{
94 int ret;
95
96 phy_lock_mdio_bus(phydev);
97 ret = __bcm_phy_modify_exp(phydev, reg, mask, set);
98 phy_unlock_mdio_bus(phydev);
99
100 return ret;
101}
102EXPORT_SYMBOL_GPL(bcm_phy_modify_exp);
103
Florian Fainelli5519da82016-11-22 11:40:54 -0800104int bcm54xx_auxctl_read(struct phy_device *phydev, u16 regnum)
105{
106 /* The register must be written to both the Shadow Register Select and
107 * the Shadow Read Register Selector
108 */
Florian Fainelli733a9692018-05-22 16:22:26 -0700109 phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MASK |
Florian Fainelli5519da82016-11-22 11:40:54 -0800110 regnum << MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT);
111 return phy_read(phydev, MII_BCM54XX_AUX_CTL);
112}
113EXPORT_SYMBOL_GPL(bcm54xx_auxctl_read);
114
115int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
116{
117 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
118}
119EXPORT_SYMBOL(bcm54xx_auxctl_write);
120
Arun Parameswarana1cba562015-10-06 12:25:48 -0700121int bcm_phy_write_misc(struct phy_device *phydev,
122 u16 reg, u16 chl, u16 val)
123{
124 int rc;
125 int tmp;
126
127 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL,
128 MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
129 if (rc < 0)
130 return rc;
131
132 tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
133 tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
134 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
135 if (rc < 0)
136 return rc;
137
138 tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg;
139 rc = bcm_phy_write_exp(phydev, tmp, val);
140
141 return rc;
142}
143EXPORT_SYMBOL_GPL(bcm_phy_write_misc);
144
145int bcm_phy_read_misc(struct phy_device *phydev,
146 u16 reg, u16 chl)
147{
148 int rc;
149 int tmp;
150
151 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL,
152 MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
153 if (rc < 0)
154 return rc;
155
156 tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
157 tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
158 rc = phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
159 if (rc < 0)
160 return rc;
161
162 tmp = (chl * MII_BCM_CHANNEL_WIDTH) | reg;
163 rc = bcm_phy_read_exp(phydev, tmp);
164
165 return rc;
166}
167EXPORT_SYMBOL_GPL(bcm_phy_read_misc);
168
169int bcm_phy_ack_intr(struct phy_device *phydev)
170{
171 int reg;
172
173 /* Clear pending interrupts. */
174 reg = phy_read(phydev, MII_BCM54XX_ISR);
175 if (reg < 0)
176 return reg;
177
178 return 0;
179}
180EXPORT_SYMBOL_GPL(bcm_phy_ack_intr);
181
182int bcm_phy_config_intr(struct phy_device *phydev)
183{
Ioana Ciornei15772e42020-11-01 14:51:07 +0200184 int reg, err;
Arun Parameswarana1cba562015-10-06 12:25:48 -0700185
186 reg = phy_read(phydev, MII_BCM54XX_ECR);
187 if (reg < 0)
188 return reg;
189
Ioana Ciornei15772e42020-11-01 14:51:07 +0200190 if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
191 err = bcm_phy_ack_intr(phydev);
192 if (err)
193 return err;
Arun Parameswarana1cba562015-10-06 12:25:48 -0700194
Ioana Ciornei15772e42020-11-01 14:51:07 +0200195 reg &= ~MII_BCM54XX_ECR_IM;
196 err = phy_write(phydev, MII_BCM54XX_ECR, reg);
197 } else {
198 reg |= MII_BCM54XX_ECR_IM;
199 err = phy_write(phydev, MII_BCM54XX_ECR, reg);
200 if (err)
201 return err;
202
203 err = bcm_phy_ack_intr(phydev);
204 }
205 return err;
Arun Parameswarana1cba562015-10-06 12:25:48 -0700206}
207EXPORT_SYMBOL_GPL(bcm_phy_config_intr);
208
Ioana Ciornei4567d5c2020-11-01 14:51:06 +0200209irqreturn_t bcm_phy_handle_interrupt(struct phy_device *phydev)
210{
211 int irq_status, irq_mask;
212
213 irq_status = phy_read(phydev, MII_BCM54XX_ISR);
214 if (irq_status < 0) {
215 phy_error(phydev);
216 return IRQ_NONE;
217 }
218
219 /* If a bit from the Interrupt Mask register is set, the corresponding
220 * bit from the Interrupt Status register is masked. So read the IMR
221 * and then flip the bits to get the list of possible interrupt
222 * sources.
223 */
224 irq_mask = phy_read(phydev, MII_BCM54XX_IMR);
225 if (irq_mask < 0) {
226 phy_error(phydev);
227 return IRQ_NONE;
228 }
229 irq_mask = ~irq_mask;
230
231 if (!(irq_status & irq_mask))
232 return IRQ_NONE;
233
234 phy_trigger_machine(phydev);
235
236 return IRQ_HANDLED;
237}
238EXPORT_SYMBOL_GPL(bcm_phy_handle_interrupt);
239
Arun Parameswarana1cba562015-10-06 12:25:48 -0700240int bcm_phy_read_shadow(struct phy_device *phydev, u16 shadow)
241{
242 phy_write(phydev, MII_BCM54XX_SHD, MII_BCM54XX_SHD_VAL(shadow));
243 return MII_BCM54XX_SHD_DATA(phy_read(phydev, MII_BCM54XX_SHD));
244}
245EXPORT_SYMBOL_GPL(bcm_phy_read_shadow);
246
247int bcm_phy_write_shadow(struct phy_device *phydev, u16 shadow,
248 u16 val)
249{
250 return phy_write(phydev, MII_BCM54XX_SHD,
251 MII_BCM54XX_SHD_WRITE |
252 MII_BCM54XX_SHD_VAL(shadow) |
253 MII_BCM54XX_SHD_DATA(val));
254}
255EXPORT_SYMBOL_GPL(bcm_phy_write_shadow);
256
Michael Walle0a32f1f2020-04-20 20:21:11 +0200257int __bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb)
258{
259 int val;
260
261 val = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
262 if (val < 0)
263 return val;
264
265 return __phy_read(phydev, MII_BCM54XX_RDB_DATA);
266}
267EXPORT_SYMBOL_GPL(__bcm_phy_read_rdb);
268
269int bcm_phy_read_rdb(struct phy_device *phydev, u16 rdb)
270{
271 int ret;
272
273 phy_lock_mdio_bus(phydev);
274 ret = __bcm_phy_read_rdb(phydev, rdb);
275 phy_unlock_mdio_bus(phydev);
276
277 return ret;
278}
279EXPORT_SYMBOL_GPL(bcm_phy_read_rdb);
280
281int __bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val)
282{
283 int ret;
284
285 ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
286 if (ret < 0)
287 return ret;
288
289 return __phy_write(phydev, MII_BCM54XX_RDB_DATA, val);
290}
291EXPORT_SYMBOL_GPL(__bcm_phy_write_rdb);
292
293int bcm_phy_write_rdb(struct phy_device *phydev, u16 rdb, u16 val)
294{
295 int ret;
296
297 phy_lock_mdio_bus(phydev);
298 ret = __bcm_phy_write_rdb(phydev, rdb, val);
299 phy_unlock_mdio_bus(phydev);
300
301 return ret;
302}
303EXPORT_SYMBOL_GPL(bcm_phy_write_rdb);
304
305int __bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set)
306{
307 int new, ret;
308
309 ret = __phy_write(phydev, MII_BCM54XX_RDB_ADDR, rdb);
310 if (ret < 0)
311 return ret;
312
313 ret = __phy_read(phydev, MII_BCM54XX_RDB_DATA);
314 if (ret < 0)
315 return ret;
316
317 new = (ret & ~mask) | set;
318 if (new == ret)
319 return 0;
320
321 return __phy_write(phydev, MII_BCM54XX_RDB_DATA, new);
322}
323EXPORT_SYMBOL_GPL(__bcm_phy_modify_rdb);
324
325int bcm_phy_modify_rdb(struct phy_device *phydev, u16 rdb, u16 mask, u16 set)
326{
327 int ret;
328
329 phy_lock_mdio_bus(phydev);
330 ret = __bcm_phy_modify_rdb(phydev, rdb, mask, set);
331 phy_unlock_mdio_bus(phydev);
332
333 return ret;
334}
335EXPORT_SYMBOL_GPL(bcm_phy_modify_rdb);
336
Arun Parameswarana1cba562015-10-06 12:25:48 -0700337int bcm_phy_enable_apd(struct phy_device *phydev, bool dll_pwr_down)
338{
339 int val;
340
341 if (dll_pwr_down) {
342 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR3);
343 if (val < 0)
344 return val;
345
346 val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
347 bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR3, val);
348 }
349
350 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_APD);
351 if (val < 0)
352 return val;
353
354 /* Clear APD bits */
355 val &= BCM_APD_CLR_MASK;
356
357 if (phydev->autoneg == AUTONEG_ENABLE)
358 val |= BCM54XX_SHD_APD_EN;
359 else
360 val |= BCM_NO_ANEG_APD_EN;
361
362 /* Enable energy detect single link pulse for easy wakeup */
363 val |= BCM_APD_SINGLELP_EN;
364
365 /* Enable Auto Power-Down (APD) for the PHY */
366 return bcm_phy_write_shadow(phydev, BCM54XX_SHD_APD, val);
367}
368EXPORT_SYMBOL_GPL(bcm_phy_enable_apd);
369
Florian Fainelli99cec8a2016-11-22 11:40:56 -0800370int bcm_phy_set_eee(struct phy_device *phydev, bool enable)
Arun Parameswarana1cba562015-10-06 12:25:48 -0700371{
Florian Fainellic056d482021-03-30 15:00:24 -0700372 int val, mask = 0;
Arun Parameswarana1cba562015-10-06 12:25:48 -0700373
374 /* Enable EEE at PHY level */
Russell Kinga6d99fc2017-03-21 16:36:53 +0000375 val = phy_read_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL);
Arun Parameswarana1cba562015-10-06 12:25:48 -0700376 if (val < 0)
377 return val;
378
Florian Fainelli99cec8a2016-11-22 11:40:56 -0800379 if (enable)
380 val |= LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X;
381 else
382 val &= ~(LPI_FEATURE_EN | LPI_FEATURE_EN_DIG1000X);
Arun Parameswarana1cba562015-10-06 12:25:48 -0700383
Russell Kinga6d99fc2017-03-21 16:36:53 +0000384 phy_write_mmd(phydev, MDIO_MMD_AN, BRCM_CL45VEN_EEE_CONTROL, (u32)val);
Arun Parameswarana1cba562015-10-06 12:25:48 -0700385
386 /* Advertise EEE */
Russell Kinga6d99fc2017-03-21 16:36:53 +0000387 val = phy_read_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV);
Arun Parameswarana1cba562015-10-06 12:25:48 -0700388 if (val < 0)
389 return val;
390
Florian Fainellic056d482021-03-30 15:00:24 -0700391 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
392 phydev->supported))
393 mask |= MDIO_EEE_1000T;
394 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT,
395 phydev->supported))
396 mask |= MDIO_EEE_100TX;
397
Florian Fainelli99cec8a2016-11-22 11:40:56 -0800398 if (enable)
Florian Fainellic056d482021-03-30 15:00:24 -0700399 val |= mask;
Florian Fainelli99cec8a2016-11-22 11:40:56 -0800400 else
Florian Fainellic056d482021-03-30 15:00:24 -0700401 val &= ~mask;
Arun Parameswarana1cba562015-10-06 12:25:48 -0700402
Russell Kinga6d99fc2017-03-21 16:36:53 +0000403 phy_write_mmd(phydev, MDIO_MMD_AN, BCM_CL45VEN_EEE_ADV, (u32)val);
Arun Parameswarana1cba562015-10-06 12:25:48 -0700404
405 return 0;
406}
Florian Fainelli99cec8a2016-11-22 11:40:56 -0800407EXPORT_SYMBOL_GPL(bcm_phy_set_eee);
Arun Parameswaranb89eb1fc2015-10-15 10:37:13 -0700408
Florian Fainellid06f78c2016-11-22 11:40:55 -0800409int bcm_phy_downshift_get(struct phy_device *phydev, u8 *count)
410{
411 int val;
412
413 val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
414 if (val < 0)
415 return val;
416
417 /* Check if wirespeed is enabled or not */
418 if (!(val & MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN)) {
419 *count = DOWNSHIFT_DEV_DISABLE;
420 return 0;
421 }
422
423 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR2);
424 if (val < 0)
425 return val;
426
427 /* Downgrade after one link attempt */
428 if (val & BCM54XX_SHD_SCR2_WSPD_RTRY_DIS) {
429 *count = 1;
430 } else {
431 /* Downgrade after configured retry count */
432 val >>= BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT;
433 val &= BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK;
434 *count = val + BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET;
435 }
436
437 return 0;
438}
439EXPORT_SYMBOL_GPL(bcm_phy_downshift_get);
440
441int bcm_phy_downshift_set(struct phy_device *phydev, u8 count)
442{
443 int val = 0, ret = 0;
444
445 /* Range check the number given */
446 if (count - BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET >
447 BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK &&
448 count != DOWNSHIFT_DEV_DEFAULT_COUNT) {
449 return -ERANGE;
450 }
451
452 val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
453 if (val < 0)
454 return val;
455
456 /* Se the write enable bit */
457 val |= MII_BCM54XX_AUXCTL_MISC_WREN;
458
459 if (count == DOWNSHIFT_DEV_DISABLE) {
460 val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN;
461 return bcm54xx_auxctl_write(phydev,
462 MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
463 val);
464 } else {
465 val |= MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN;
466 ret = bcm54xx_auxctl_write(phydev,
467 MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
468 val);
469 if (ret < 0)
470 return ret;
471 }
472
473 val = bcm_phy_read_shadow(phydev, BCM54XX_SHD_SCR2);
474 val &= ~(BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_MASK <<
475 BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT |
476 BCM54XX_SHD_SCR2_WSPD_RTRY_DIS);
477
478 switch (count) {
479 case 1:
480 val |= BCM54XX_SHD_SCR2_WSPD_RTRY_DIS;
481 break;
482 case DOWNSHIFT_DEV_DEFAULT_COUNT:
483 val |= 1 << BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT;
484 break;
485 default:
486 val |= (count - BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_OFFSET) <<
487 BCM54XX_SHD_SCR2_WSPD_RTRY_LMT_SHIFT;
488 break;
489 }
490
491 return bcm_phy_write_shadow(phydev, BCM54XX_SHD_SCR2, val);
492}
493EXPORT_SYMBOL_GPL(bcm_phy_downshift_set);
494
Florian Fainelli820ee172016-11-29 09:57:17 -0800495struct bcm_phy_hw_stat {
496 const char *string;
497 u8 reg;
498 u8 shift;
499 u8 bits;
500};
501
502/* Counters freeze at either 0xffff or 0xff, better than nothing */
503static const struct bcm_phy_hw_stat bcm_phy_hw_stats[] = {
504 { "phy_receive_errors", MII_BRCM_CORE_BASE12, 0, 16 },
505 { "phy_serdes_ber_errors", MII_BRCM_CORE_BASE13, 8, 8 },
506 { "phy_false_carrier_sense_errors", MII_BRCM_CORE_BASE13, 0, 8 },
507 { "phy_local_rcvr_nok", MII_BRCM_CORE_BASE14, 8, 8 },
508 { "phy_remote_rcv_nok", MII_BRCM_CORE_BASE14, 0, 8 },
509};
510
511int bcm_phy_get_sset_count(struct phy_device *phydev)
512{
513 return ARRAY_SIZE(bcm_phy_hw_stats);
514}
515EXPORT_SYMBOL_GPL(bcm_phy_get_sset_count);
516
517void bcm_phy_get_strings(struct phy_device *phydev, u8 *data)
518{
519 unsigned int i;
520
521 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
Florian Fainelli8a17eef2018-03-02 15:08:39 -0800522 strlcpy(data + i * ETH_GSTRING_LEN,
523 bcm_phy_hw_stats[i].string, ETH_GSTRING_LEN);
Florian Fainelli820ee172016-11-29 09:57:17 -0800524}
525EXPORT_SYMBOL_GPL(bcm_phy_get_strings);
526
Florian Fainelli820ee172016-11-29 09:57:17 -0800527/* Caller is supposed to provide appropriate storage for the library code to
528 * access the shadow copy
529 */
530static u64 bcm_phy_get_stat(struct phy_device *phydev, u64 *shadow,
531 unsigned int i)
532{
533 struct bcm_phy_hw_stat stat = bcm_phy_hw_stats[i];
534 int val;
535 u64 ret;
536
537 val = phy_read(phydev, stat.reg);
538 if (val < 0) {
Jisheng Zhang6c3442f2018-04-27 16:18:58 +0800539 ret = U64_MAX;
Florian Fainelli820ee172016-11-29 09:57:17 -0800540 } else {
541 val >>= stat.shift;
542 val = val & ((1 << stat.bits) - 1);
543 shadow[i] += val;
544 ret = shadow[i];
545 }
546
547 return ret;
548}
549
550void bcm_phy_get_stats(struct phy_device *phydev, u64 *shadow,
551 struct ethtool_stats *stats, u64 *data)
552{
553 unsigned int i;
554
555 for (i = 0; i < ARRAY_SIZE(bcm_phy_hw_stats); i++)
556 data[i] = bcm_phy_get_stat(phydev, shadow, i);
557}
558EXPORT_SYMBOL_GPL(bcm_phy_get_stats);
559
Florian Fainellif878fe52019-03-20 12:53:12 -0700560void bcm_phy_r_rc_cal_reset(struct phy_device *phydev)
561{
562 /* Reset R_CAL/RC_CAL Engine */
563 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0010);
564
565 /* Disable Reset R_AL/RC_CAL Engine */
566 bcm_phy_write_exp_sel(phydev, 0x00b0, 0x0000);
567}
568EXPORT_SYMBOL_GPL(bcm_phy_r_rc_cal_reset);
569
570int bcm_phy_28nm_a0b0_afe_config_init(struct phy_device *phydev)
571{
572 /* Increase VCO range to prevent unlocking problem of PLL at low
573 * temp
574 */
575 bcm_phy_write_misc(phydev, PLL_PLLCTRL_1, 0x0048);
576
577 /* Change Ki to 011 */
578 bcm_phy_write_misc(phydev, PLL_PLLCTRL_2, 0x021b);
579
580 /* Disable loading of TVCO buffer to bandgap, set bandgap trim
581 * to 111
582 */
583 bcm_phy_write_misc(phydev, PLL_PLLCTRL_4, 0x0e20);
584
585 /* Adjust bias current trim by -3 */
586 bcm_phy_write_misc(phydev, DSP_TAP10, 0x690b);
587
588 /* Switch to CORE_BASE1E */
589 phy_write(phydev, MII_BRCM_CORE_BASE1E, 0xd);
590
591 bcm_phy_r_rc_cal_reset(phydev);
592
593 /* write AFE_RXCONFIG_0 */
594 bcm_phy_write_misc(phydev, AFE_RXCONFIG_0, 0xeb19);
595
596 /* write AFE_RXCONFIG_1 */
597 bcm_phy_write_misc(phydev, AFE_RXCONFIG_1, 0x9a3f);
598
599 /* write AFE_RX_LP_COUNTER */
600 bcm_phy_write_misc(phydev, AFE_RX_LP_COUNTER, 0x7fc0);
601
602 /* write AFE_HPF_TRIM_OTHERS */
603 bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x000b);
604
605 /* write AFTE_TX_CONFIG */
606 bcm_phy_write_misc(phydev, AFE_TX_CONFIG, 0x0800);
607
608 return 0;
609}
610EXPORT_SYMBOL_GPL(bcm_phy_28nm_a0b0_afe_config_init);
611
Murali Krishna Policharlaab41ca32020-03-27 21:55:40 +0200612int bcm_phy_enable_jumbo(struct phy_device *phydev)
613{
614 int ret;
615
616 ret = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL);
617 if (ret < 0)
618 return ret;
619
620 /* Enable extended length packet reception */
621 ret = bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
622 ret | MII_BCM54XX_AUXCTL_ACTL_EXT_PKT_LEN);
623 if (ret < 0)
624 return ret;
625
626 /* Enable the elastic FIFO for raising the transmission limit from
627 * 4.5KB to 10KB, at the expense of an additional 16 ns in propagation
628 * latency.
629 */
630 return phy_set_bits(phydev, MII_BCM54XX_ECR, MII_BCM54XX_ECR_FIFOE);
631}
632EXPORT_SYMBOL_GPL(bcm_phy_enable_jumbo);
633
Michael Walle3190ca32020-05-30 22:34:04 +0200634static int __bcm_phy_enable_rdb_access(struct phy_device *phydev)
Michael Walle11ecf8c2020-05-13 18:35:23 +0200635{
636 return __bcm_phy_write_exp(phydev, BCM54XX_EXP_REG7E, 0);
637}
Michael Walle11ecf8c2020-05-13 18:35:23 +0200638
Michael Walle3190ca32020-05-30 22:34:04 +0200639static int __bcm_phy_enable_legacy_access(struct phy_device *phydev)
Michael Walle11ecf8c2020-05-13 18:35:23 +0200640{
641 return __bcm_phy_write_rdb(phydev, BCM54XX_RDB_REG0087,
642 BCM54XX_ACCESS_MODE_LEGACY_EN);
643}
Michael Walle11ecf8c2020-05-13 18:35:23 +0200644
645static int _bcm_phy_cable_test_start(struct phy_device *phydev, bool is_rdb)
646{
647 u16 mask, set;
648 int ret;
649
650 /* Auto-negotiation must be enabled for cable diagnostics to work, but
651 * don't advertise any capabilities.
652 */
653 phy_write(phydev, MII_BMCR, BMCR_ANENABLE);
654 phy_write(phydev, MII_ADVERTISE, ADVERTISE_CSMA);
655 phy_write(phydev, MII_CTRL1000, 0);
656
657 phy_lock_mdio_bus(phydev);
658 if (is_rdb) {
659 ret = __bcm_phy_enable_legacy_access(phydev);
660 if (ret)
661 goto out;
662 }
663
664 mask = BCM54XX_ECD_CTRL_CROSS_SHORT_DIS | BCM54XX_ECD_CTRL_UNIT_MASK;
665 set = BCM54XX_ECD_CTRL_RUN | BCM54XX_ECD_CTRL_BREAK_LINK |
666 FIELD_PREP(BCM54XX_ECD_CTRL_UNIT_MASK,
667 BCM54XX_ECD_CTRL_UNIT_CM);
668
669 ret = __bcm_phy_modify_exp(phydev, BCM54XX_EXP_ECD_CTRL, mask, set);
670
671out:
672 /* re-enable the RDB access even if there was an error */
673 if (is_rdb)
674 ret = __bcm_phy_enable_rdb_access(phydev) ? : ret;
675
676 phy_unlock_mdio_bus(phydev);
677
678 return ret;
679}
680
681static int bcm_phy_cable_test_report_trans(int result)
682{
683 switch (result) {
684 case BCM54XX_ECD_FAULT_TYPE_OK:
685 return ETHTOOL_A_CABLE_RESULT_CODE_OK;
686 case BCM54XX_ECD_FAULT_TYPE_OPEN:
687 return ETHTOOL_A_CABLE_RESULT_CODE_OPEN;
688 case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT:
689 return ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT;
690 case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT:
691 return ETHTOOL_A_CABLE_RESULT_CODE_CROSS_SHORT;
692 case BCM54XX_ECD_FAULT_TYPE_INVALID:
693 case BCM54XX_ECD_FAULT_TYPE_BUSY:
694 default:
695 return ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC;
696 }
697}
698
699static bool bcm_phy_distance_valid(int result)
700{
701 switch (result) {
702 case BCM54XX_ECD_FAULT_TYPE_OPEN:
703 case BCM54XX_ECD_FAULT_TYPE_SAME_SHORT:
704 case BCM54XX_ECD_FAULT_TYPE_CROSS_SHORT:
705 return true;
706 }
707 return false;
708}
709
710static int bcm_phy_report_length(struct phy_device *phydev, int pair)
711{
712 int val;
713
714 val = __bcm_phy_read_exp(phydev,
715 BCM54XX_EXP_ECD_PAIR_A_LENGTH_RESULTS + pair);
716 if (val < 0)
717 return val;
718
719 if (val == BCM54XX_ECD_LENGTH_RESULTS_INVALID)
720 return 0;
721
722 ethnl_cable_test_fault_length(phydev, pair, val);
723
724 return 0;
725}
726
727static int _bcm_phy_cable_test_get_status(struct phy_device *phydev,
728 bool *finished, bool is_rdb)
729{
730 int pair_a, pair_b, pair_c, pair_d, ret;
731
732 *finished = false;
733
734 phy_lock_mdio_bus(phydev);
735
736 if (is_rdb) {
737 ret = __bcm_phy_enable_legacy_access(phydev);
738 if (ret)
739 goto out;
740 }
741
742 ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_CTRL);
743 if (ret < 0)
744 goto out;
745
746 if (ret & BCM54XX_ECD_CTRL_IN_PROGRESS) {
747 ret = 0;
748 goto out;
749 }
750
751 ret = __bcm_phy_read_exp(phydev, BCM54XX_EXP_ECD_FAULT_TYPE);
752 if (ret < 0)
753 goto out;
754
755 pair_a = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_A_MASK, ret);
756 pair_b = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_B_MASK, ret);
757 pair_c = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_C_MASK, ret);
758 pair_d = FIELD_GET(BCM54XX_ECD_FAULT_TYPE_PAIR_D_MASK, ret);
759
760 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A,
761 bcm_phy_cable_test_report_trans(pair_a));
762 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_B,
763 bcm_phy_cable_test_report_trans(pair_b));
764 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_C,
765 bcm_phy_cable_test_report_trans(pair_c));
766 ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_D,
767 bcm_phy_cable_test_report_trans(pair_d));
768
769 if (bcm_phy_distance_valid(pair_a))
770 bcm_phy_report_length(phydev, 0);
771 if (bcm_phy_distance_valid(pair_b))
772 bcm_phy_report_length(phydev, 1);
773 if (bcm_phy_distance_valid(pair_c))
774 bcm_phy_report_length(phydev, 2);
775 if (bcm_phy_distance_valid(pair_d))
776 bcm_phy_report_length(phydev, 3);
777
778 ret = 0;
779 *finished = true;
780out:
781 /* re-enable the RDB access even if there was an error */
782 if (is_rdb)
783 ret = __bcm_phy_enable_rdb_access(phydev) ? : ret;
784
785 phy_unlock_mdio_bus(phydev);
786
787 return ret;
788}
789
790int bcm_phy_cable_test_start(struct phy_device *phydev)
791{
792 return _bcm_phy_cable_test_start(phydev, false);
793}
794EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start);
795
796int bcm_phy_cable_test_get_status(struct phy_device *phydev, bool *finished)
797{
798 return _bcm_phy_cable_test_get_status(phydev, finished, false);
799}
800EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status);
801
802/* We assume that all PHYs which support RDB access can be switched to legacy
803 * mode. If, in the future, this is not true anymore, we have to re-implement
804 * this with RDB access.
805 */
806int bcm_phy_cable_test_start_rdb(struct phy_device *phydev)
807{
808 return _bcm_phy_cable_test_start(phydev, true);
809}
810EXPORT_SYMBOL_GPL(bcm_phy_cable_test_start_rdb);
811
812int bcm_phy_cable_test_get_status_rdb(struct phy_device *phydev,
813 bool *finished)
814{
815 return _bcm_phy_cable_test_get_status(phydev, finished, true);
816}
817EXPORT_SYMBOL_GPL(bcm_phy_cable_test_get_status_rdb);
818
Arun Parameswaranb89eb1fc2015-10-15 10:37:13 -0700819MODULE_DESCRIPTION("Broadcom PHY Library");
820MODULE_LICENSE("GPL v2");
821MODULE_AUTHOR("Broadcom Corporation");