blob: 9978245474a7a87305f340c67d9d29e10207f2a9 [file] [log] [blame]
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001/*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
Andrew Lunn87c8cef2015-06-20 18:42:28 +020011#include <linux/debugfs.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000012#include <linux/delay.h>
Guenter Roeckdefb05b2015-03-26 18:36:38 -070013#include <linux/etherdevice.h>
Guenter Roeckfacd95b2015-03-26 18:36:35 -070014#include <linux/if_bridge.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000015#include <linux/jiffies.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000016#include <linux/list.h>
Paul Gortmaker2bbba272012-01-24 10:41:40 +000017#include <linux/module.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000018#include <linux/netdevice.h>
19#include <linux/phy.h>
Andrew Lunn87c8cef2015-06-20 18:42:28 +020020#include <linux/seq_file.h>
Ben Hutchingsc8f0b862011-11-27 17:06:08 +000021#include <net/dsa.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000022#include "mv88e6xxx.h"
23
Andrew Lunn16fe24f2015-05-06 01:09:55 +020024/* MDIO bus access can be nested in the case of PHYs connected to the
25 * internal MDIO bus of the switch, which is accessed via MDIO bus of
26 * the Ethernet interface. Avoid lockdep false positives by using
27 * mutex_lock_nested().
28 */
29static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
30{
31 int ret;
32
33 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
34 ret = bus->read(bus, addr, regnum);
35 mutex_unlock(&bus->mdio_lock);
36
37 return ret;
38}
39
40static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
41 u16 val)
42{
43 int ret;
44
45 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
46 ret = bus->write(bus, addr, regnum, val);
47 mutex_unlock(&bus->mdio_lock);
48
49 return ret;
50}
51
Barry Grussling3675c8d2013-01-08 16:05:53 +000052/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000053 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
54 * will be directly accessible on some {device address,register address}
55 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
56 * will only respond to SMI transactions to that specific address, and
57 * an indirect addressing mechanism needs to be used to access its
58 * registers.
59 */
60static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
61{
62 int ret;
63 int i;
64
65 for (i = 0; i < 16; i++) {
Andrew Lunn16fe24f2015-05-06 01:09:55 +020066 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000067 if (ret < 0)
68 return ret;
69
Andrew Lunncca8b132015-04-02 04:06:39 +020070 if ((ret & SMI_CMD_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000071 return 0;
72 }
73
74 return -ETIMEDOUT;
75}
76
77int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
78{
79 int ret;
80
81 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +020082 return mv88e6xxx_mdiobus_read(bus, addr, reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000083
Barry Grussling3675c8d2013-01-08 16:05:53 +000084 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000085 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
86 if (ret < 0)
87 return ret;
88
Barry Grussling3675c8d2013-01-08 16:05:53 +000089 /* Transmit the read command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +020090 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
91 SMI_CMD_OP_22_READ | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000092 if (ret < 0)
93 return ret;
94
Barry Grussling3675c8d2013-01-08 16:05:53 +000095 /* Wait for the read command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000096 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
97 if (ret < 0)
98 return ret;
99
Barry Grussling3675c8d2013-01-08 16:05:53 +0000100 /* Read the data. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200101 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000102 if (ret < 0)
103 return ret;
104
105 return ret & 0xffff;
106}
107
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700108/* Must be called with SMI mutex held */
109static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000110{
Guenter Roeckb184e492014-10-17 12:30:58 -0700111 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000112 int ret;
113
Guenter Roeckb184e492014-10-17 12:30:58 -0700114 if (bus == NULL)
115 return -EINVAL;
116
Guenter Roeckb184e492014-10-17 12:30:58 -0700117 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500118 if (ret < 0)
119 return ret;
120
121 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
122 addr, reg, ret);
123
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000124 return ret;
125}
126
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700127int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
128{
129 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
130 int ret;
131
132 mutex_lock(&ps->smi_mutex);
133 ret = _mv88e6xxx_reg_read(ds, addr, reg);
134 mutex_unlock(&ps->smi_mutex);
135
136 return ret;
137}
138
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000139int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
140 int reg, u16 val)
141{
142 int ret;
143
144 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200145 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000146
Barry Grussling3675c8d2013-01-08 16:05:53 +0000147 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000148 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
149 if (ret < 0)
150 return ret;
151
Barry Grussling3675c8d2013-01-08 16:05:53 +0000152 /* Transmit the data to write. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200153 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000154 if (ret < 0)
155 return ret;
156
Barry Grussling3675c8d2013-01-08 16:05:53 +0000157 /* Transmit the write command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200158 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
159 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000160 if (ret < 0)
161 return ret;
162
Barry Grussling3675c8d2013-01-08 16:05:53 +0000163 /* Wait for the write command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000164 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
165 if (ret < 0)
166 return ret;
167
168 return 0;
169}
170
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700171/* Must be called with SMI mutex held */
172static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
173 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000174{
Guenter Roeckb184e492014-10-17 12:30:58 -0700175 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000176
Guenter Roeckb184e492014-10-17 12:30:58 -0700177 if (bus == NULL)
178 return -EINVAL;
179
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500180 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
181 addr, reg, val);
182
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700183 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
184}
185
186int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
187{
188 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
189 int ret;
190
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000191 mutex_lock(&ps->smi_mutex);
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700192 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000193 mutex_unlock(&ps->smi_mutex);
194
195 return ret;
196}
197
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000198int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
199{
Andrew Lunncca8b132015-04-02 04:06:39 +0200200 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
201 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
202 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000203
204 return 0;
205}
206
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000207int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
208{
209 int i;
210 int ret;
211
212 for (i = 0; i < 6; i++) {
213 int j;
214
Barry Grussling3675c8d2013-01-08 16:05:53 +0000215 /* Write the MAC address byte. */
Andrew Lunncca8b132015-04-02 04:06:39 +0200216 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
217 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000218
Barry Grussling3675c8d2013-01-08 16:05:53 +0000219 /* Wait for the write to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000220 for (j = 0; j < 16; j++) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200221 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
222 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000223 break;
224 }
225 if (j == 16)
226 return -ETIMEDOUT;
227 }
228
229 return 0;
230}
231
Andrew Lunn3898c142015-05-06 01:09:53 +0200232/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200233static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000234{
235 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200236 return _mv88e6xxx_reg_read(ds, addr, regnum);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000237 return 0xffff;
238}
239
Andrew Lunn3898c142015-05-06 01:09:53 +0200240/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200241static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
242 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000243{
244 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200245 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000246 return 0;
247}
248
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000249#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
250static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
251{
252 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000253 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000254
Andrew Lunncca8b132015-04-02 04:06:39 +0200255 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
256 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
257 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000258
Barry Grussling19b2f972013-01-08 16:05:54 +0000259 timeout = jiffies + 1 * HZ;
260 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200261 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000262 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200263 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
264 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000265 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000266 }
267
268 return -ETIMEDOUT;
269}
270
271static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
272{
273 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000274 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000275
Andrew Lunncca8b132015-04-02 04:06:39 +0200276 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
277 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000278
Barry Grussling19b2f972013-01-08 16:05:54 +0000279 timeout = jiffies + 1 * HZ;
280 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200281 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000282 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200283 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
284 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000285 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000286 }
287
288 return -ETIMEDOUT;
289}
290
291static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
292{
293 struct mv88e6xxx_priv_state *ps;
294
295 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
296 if (mutex_trylock(&ps->ppu_mutex)) {
Barry Grussling85686582013-01-08 16:05:56 +0000297 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000298
Barry Grussling85686582013-01-08 16:05:56 +0000299 if (mv88e6xxx_ppu_enable(ds) == 0)
300 ps->ppu_disabled = 0;
301 mutex_unlock(&ps->ppu_mutex);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000302 }
303}
304
305static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
306{
307 struct mv88e6xxx_priv_state *ps = (void *)_ps;
308
309 schedule_work(&ps->ppu_work);
310}
311
312static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
313{
Florian Fainellia22adce2014-04-28 11:14:28 -0700314 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000315 int ret;
316
317 mutex_lock(&ps->ppu_mutex);
318
Barry Grussling3675c8d2013-01-08 16:05:53 +0000319 /* If the PHY polling unit is enabled, disable it so that
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000320 * we can access the PHY registers. If it was already
321 * disabled, cancel the timer that is going to re-enable
322 * it.
323 */
324 if (!ps->ppu_disabled) {
Barry Grussling85686582013-01-08 16:05:56 +0000325 ret = mv88e6xxx_ppu_disable(ds);
326 if (ret < 0) {
327 mutex_unlock(&ps->ppu_mutex);
328 return ret;
329 }
330 ps->ppu_disabled = 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000331 } else {
Barry Grussling85686582013-01-08 16:05:56 +0000332 del_timer(&ps->ppu_timer);
333 ret = 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000334 }
335
336 return ret;
337}
338
339static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
340{
Florian Fainellia22adce2014-04-28 11:14:28 -0700341 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000342
Barry Grussling3675c8d2013-01-08 16:05:53 +0000343 /* Schedule a timer to re-enable the PHY polling unit. */
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000344 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
345 mutex_unlock(&ps->ppu_mutex);
346}
347
348void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
349{
Florian Fainellia22adce2014-04-28 11:14:28 -0700350 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000351
352 mutex_init(&ps->ppu_mutex);
353 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
354 init_timer(&ps->ppu_timer);
355 ps->ppu_timer.data = (unsigned long)ps;
356 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
357}
358
359int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
360{
361 int ret;
362
363 ret = mv88e6xxx_ppu_access_get(ds);
364 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000365 ret = mv88e6xxx_reg_read(ds, addr, regnum);
366 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000367 }
368
369 return ret;
370}
371
372int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
373 int regnum, u16 val)
374{
375 int ret;
376
377 ret = mv88e6xxx_ppu_access_get(ds);
378 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000379 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
380 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000381 }
382
383 return ret;
384}
385#endif
386
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000387void mv88e6xxx_poll_link(struct dsa_switch *ds)
388{
389 int i;
390
391 for (i = 0; i < DSA_MAX_PORTS; i++) {
392 struct net_device *dev;
Ingo Molnar2a9e7972008-11-25 16:50:49 -0800393 int uninitialized_var(port_status);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000394 int link;
395 int speed;
396 int duplex;
397 int fc;
398
399 dev = ds->ports[i];
400 if (dev == NULL)
401 continue;
402
403 link = 0;
404 if (dev->flags & IFF_UP) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200405 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
406 PORT_STATUS);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000407 if (port_status < 0)
408 continue;
409
Andrew Lunncca8b132015-04-02 04:06:39 +0200410 link = !!(port_status & PORT_STATUS_LINK);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000411 }
412
413 if (!link) {
414 if (netif_carrier_ok(dev)) {
Barry Grusslingab381a92013-01-08 16:05:55 +0000415 netdev_info(dev, "link down\n");
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000416 netif_carrier_off(dev);
417 }
418 continue;
419 }
420
Andrew Lunncca8b132015-04-02 04:06:39 +0200421 switch (port_status & PORT_STATUS_SPEED_MASK) {
422 case PORT_STATUS_SPEED_10:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000423 speed = 10;
424 break;
Andrew Lunncca8b132015-04-02 04:06:39 +0200425 case PORT_STATUS_SPEED_100:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000426 speed = 100;
427 break;
Andrew Lunncca8b132015-04-02 04:06:39 +0200428 case PORT_STATUS_SPEED_1000:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000429 speed = 1000;
430 break;
431 default:
432 speed = -1;
433 break;
434 }
Andrew Lunncca8b132015-04-02 04:06:39 +0200435 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
436 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000437
438 if (!netif_carrier_ok(dev)) {
Barry Grusslingab381a92013-01-08 16:05:55 +0000439 netdev_info(dev,
440 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
441 speed,
442 duplex ? "full" : "half",
443 fc ? "en" : "dis");
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000444 netif_carrier_on(dev);
445 }
446 }
447}
448
Andrew Lunn54d792f2015-05-06 01:09:47 +0200449static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
450{
451 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
452
453 switch (ps->id) {
454 case PORT_SWITCH_ID_6031:
455 case PORT_SWITCH_ID_6061:
456 case PORT_SWITCH_ID_6035:
457 case PORT_SWITCH_ID_6065:
458 return true;
459 }
460 return false;
461}
462
463static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
464{
465 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
466
467 switch (ps->id) {
468 case PORT_SWITCH_ID_6092:
469 case PORT_SWITCH_ID_6095:
470 return true;
471 }
472 return false;
473}
474
475static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
476{
477 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
478
479 switch (ps->id) {
480 case PORT_SWITCH_ID_6046:
481 case PORT_SWITCH_ID_6085:
482 case PORT_SWITCH_ID_6096:
483 case PORT_SWITCH_ID_6097:
484 return true;
485 }
486 return false;
487}
488
489static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
490{
491 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
492
493 switch (ps->id) {
494 case PORT_SWITCH_ID_6123:
495 case PORT_SWITCH_ID_6161:
496 case PORT_SWITCH_ID_6165:
497 return true;
498 }
499 return false;
500}
501
502static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
503{
504 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
505
506 switch (ps->id) {
507 case PORT_SWITCH_ID_6121:
508 case PORT_SWITCH_ID_6122:
509 case PORT_SWITCH_ID_6152:
510 case PORT_SWITCH_ID_6155:
511 case PORT_SWITCH_ID_6182:
512 case PORT_SWITCH_ID_6185:
513 case PORT_SWITCH_ID_6108:
514 case PORT_SWITCH_ID_6131:
515 return true;
516 }
517 return false;
518}
519
Guenter Roeckc22995c2015-07-25 09:42:28 -0700520static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700521{
522 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
523
524 switch (ps->id) {
525 case PORT_SWITCH_ID_6320:
526 case PORT_SWITCH_ID_6321:
527 return true;
528 }
529 return false;
530}
531
Andrew Lunn54d792f2015-05-06 01:09:47 +0200532static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
533{
534 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
535
536 switch (ps->id) {
537 case PORT_SWITCH_ID_6171:
538 case PORT_SWITCH_ID_6175:
539 case PORT_SWITCH_ID_6350:
540 case PORT_SWITCH_ID_6351:
541 return true;
542 }
543 return false;
544}
545
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200546static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
547{
548 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
549
550 switch (ps->id) {
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200551 case PORT_SWITCH_ID_6172:
552 case PORT_SWITCH_ID_6176:
Andrew Lunn54d792f2015-05-06 01:09:47 +0200553 case PORT_SWITCH_ID_6240:
554 case PORT_SWITCH_ID_6352:
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200555 return true;
556 }
557 return false;
558}
559
Andrew Lunn31888232015-05-06 01:09:54 +0200560/* Must be called with SMI mutex held */
561static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000562{
563 int ret;
564 int i;
565
566 for (i = 0; i < 10; i++) {
Andrew Lunn31888232015-05-06 01:09:54 +0200567 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
Andrew Lunncca8b132015-04-02 04:06:39 +0200568 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000569 return 0;
570 }
571
572 return -ETIMEDOUT;
573}
574
Andrew Lunn31888232015-05-06 01:09:54 +0200575/* Must be called with SMI mutex held */
576static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000577{
578 int ret;
579
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700580 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200581 port = (port + 1) << 5;
582
Barry Grussling3675c8d2013-01-08 16:05:53 +0000583 /* Snapshot the hardware statistics counters for this port. */
Andrew Lunn31888232015-05-06 01:09:54 +0200584 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
585 GLOBAL_STATS_OP_CAPTURE_PORT |
586 GLOBAL_STATS_OP_HIST_RX_TX | port);
587 if (ret < 0)
588 return ret;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000589
Barry Grussling3675c8d2013-01-08 16:05:53 +0000590 /* Wait for the snapshotting to complete. */
Andrew Lunn31888232015-05-06 01:09:54 +0200591 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000592 if (ret < 0)
593 return ret;
594
595 return 0;
596}
597
Andrew Lunn31888232015-05-06 01:09:54 +0200598/* Must be called with SMI mutex held */
599static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000600{
601 u32 _val;
602 int ret;
603
604 *val = 0;
605
Andrew Lunn31888232015-05-06 01:09:54 +0200606 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
607 GLOBAL_STATS_OP_READ_CAPTURED |
608 GLOBAL_STATS_OP_HIST_RX_TX | stat);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000609 if (ret < 0)
610 return;
611
Andrew Lunn31888232015-05-06 01:09:54 +0200612 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000613 if (ret < 0)
614 return;
615
Andrew Lunn31888232015-05-06 01:09:54 +0200616 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000617 if (ret < 0)
618 return;
619
620 _val = ret << 16;
621
Andrew Lunn31888232015-05-06 01:09:54 +0200622 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000623 if (ret < 0)
624 return;
625
626 *val = _val | ret;
627}
628
Andrew Lunne413e7e2015-04-02 04:06:38 +0200629static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
630 { "in_good_octets", 8, 0x00, },
631 { "in_bad_octets", 4, 0x02, },
632 { "in_unicast", 4, 0x04, },
633 { "in_broadcasts", 4, 0x06, },
634 { "in_multicasts", 4, 0x07, },
635 { "in_pause", 4, 0x16, },
636 { "in_undersize", 4, 0x18, },
637 { "in_fragments", 4, 0x19, },
638 { "in_oversize", 4, 0x1a, },
639 { "in_jabber", 4, 0x1b, },
640 { "in_rx_error", 4, 0x1c, },
641 { "in_fcs_error", 4, 0x1d, },
642 { "out_octets", 8, 0x0e, },
643 { "out_unicast", 4, 0x10, },
644 { "out_broadcasts", 4, 0x13, },
645 { "out_multicasts", 4, 0x12, },
646 { "out_pause", 4, 0x15, },
647 { "excessive", 4, 0x11, },
648 { "collisions", 4, 0x1e, },
649 { "deferred", 4, 0x05, },
650 { "single", 4, 0x14, },
651 { "multiple", 4, 0x17, },
652 { "out_fcs_error", 4, 0x03, },
653 { "late", 4, 0x1f, },
654 { "hist_64bytes", 4, 0x08, },
655 { "hist_65_127bytes", 4, 0x09, },
656 { "hist_128_255bytes", 4, 0x0a, },
657 { "hist_256_511bytes", 4, 0x0b, },
658 { "hist_512_1023bytes", 4, 0x0c, },
659 { "hist_1024_max_bytes", 4, 0x0d, },
660 /* Not all devices have the following counters */
661 { "sw_in_discards", 4, 0x110, },
662 { "sw_in_filtered", 2, 0x112, },
663 { "sw_out_filtered", 2, 0x113, },
664
665};
666
667static bool have_sw_in_discards(struct dsa_switch *ds)
668{
669 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
670
671 switch (ps->id) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200672 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
673 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
674 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
675 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
676 case PORT_SWITCH_ID_6352:
Andrew Lunne413e7e2015-04-02 04:06:38 +0200677 return true;
678 default:
679 return false;
680 }
681}
682
683static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
684 int nr_stats,
685 struct mv88e6xxx_hw_stat *stats,
686 int port, uint8_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000687{
688 int i;
689
690 for (i = 0; i < nr_stats; i++) {
691 memcpy(data + i * ETH_GSTRING_LEN,
692 stats[i].string, ETH_GSTRING_LEN);
693 }
694}
695
Andrew Lunn80c46272015-06-20 18:42:30 +0200696static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
697 int stat,
698 struct mv88e6xxx_hw_stat *stats,
699 int port)
700{
701 struct mv88e6xxx_hw_stat *s = stats + stat;
702 u32 low;
703 u32 high = 0;
704 int ret;
705 u64 value;
706
707 if (s->reg >= 0x100) {
708 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
709 s->reg - 0x100);
710 if (ret < 0)
711 return UINT64_MAX;
712
713 low = ret;
714 if (s->sizeof_stat == 4) {
715 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
716 s->reg - 0x100 + 1);
717 if (ret < 0)
718 return UINT64_MAX;
719 high = ret;
720 }
721 } else {
722 _mv88e6xxx_stats_read(ds, s->reg, &low);
723 if (s->sizeof_stat == 8)
724 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
725 }
726 value = (((u64)high) << 16) | low;
727 return value;
728}
729
Andrew Lunne413e7e2015-04-02 04:06:38 +0200730static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
731 int nr_stats,
732 struct mv88e6xxx_hw_stat *stats,
733 int port, uint64_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000734{
Florian Fainellia22adce2014-04-28 11:14:28 -0700735 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000736 int ret;
737 int i;
738
Andrew Lunn31888232015-05-06 01:09:54 +0200739 mutex_lock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000740
Andrew Lunn31888232015-05-06 01:09:54 +0200741 ret = _mv88e6xxx_stats_snapshot(ds, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000742 if (ret < 0) {
Andrew Lunn31888232015-05-06 01:09:54 +0200743 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000744 return;
745 }
746
Barry Grussling3675c8d2013-01-08 16:05:53 +0000747 /* Read each of the counters. */
Andrew Lunn80c46272015-06-20 18:42:30 +0200748 for (i = 0; i < nr_stats; i++)
749 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000750
Andrew Lunn31888232015-05-06 01:09:54 +0200751 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000752}
Ben Hutchings98e67302011-11-25 14:36:19 +0000753
Andrew Lunne413e7e2015-04-02 04:06:38 +0200754/* All the statistics in the table */
755void
756mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
757{
758 if (have_sw_in_discards(ds))
759 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
760 mv88e6xxx_hw_stats, port, data);
761 else
762 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
763 mv88e6xxx_hw_stats, port, data);
764}
765
766int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
767{
768 if (have_sw_in_discards(ds))
769 return ARRAY_SIZE(mv88e6xxx_hw_stats);
770 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
771}
772
773void
774mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
775 int port, uint64_t *data)
776{
777 if (have_sw_in_discards(ds))
778 _mv88e6xxx_get_ethtool_stats(
779 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
780 mv88e6xxx_hw_stats, port, data);
781 else
782 _mv88e6xxx_get_ethtool_stats(
783 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
784 mv88e6xxx_hw_stats, port, data);
785}
786
Guenter Roecka1ab91f2014-10-29 10:45:05 -0700787int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
788{
789 return 32 * sizeof(u16);
790}
791
792void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
793 struct ethtool_regs *regs, void *_p)
794{
795 u16 *p = _p;
796 int i;
797
798 regs->version = 0;
799
800 memset(p, 0xff, 32 * sizeof(u16));
801
802 for (i = 0; i < 32; i++) {
803 int ret;
804
805 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
806 if (ret >= 0)
807 p[i] = ret;
808 }
809}
810
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700811/* Must be called with SMI lock held */
Andrew Lunn3898c142015-05-06 01:09:53 +0200812static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
813 u16 mask)
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700814{
815 unsigned long timeout = jiffies + HZ / 10;
816
817 while (time_before(jiffies, timeout)) {
818 int ret;
819
820 ret = _mv88e6xxx_reg_read(ds, reg, offset);
821 if (ret < 0)
822 return ret;
823 if (!(ret & mask))
824 return 0;
825
826 usleep_range(1000, 2000);
827 }
828 return -ETIMEDOUT;
829}
830
Andrew Lunn3898c142015-05-06 01:09:53 +0200831static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
832{
833 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
834 int ret;
835
836 mutex_lock(&ps->smi_mutex);
837 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
838 mutex_unlock(&ps->smi_mutex);
839
840 return ret;
841}
842
843static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
844{
845 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
846 GLOBAL2_SMI_OP_BUSY);
847}
848
849int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
850{
851 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
852 GLOBAL2_EEPROM_OP_LOAD);
853}
854
855int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
856{
857 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
858 GLOBAL2_EEPROM_OP_BUSY);
859}
860
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700861/* Must be called with SMI lock held */
862static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
863{
Andrew Lunncca8b132015-04-02 04:06:39 +0200864 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
865 GLOBAL_ATU_OP_BUSY);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700866}
867
Andrew Lunn56d95e22015-06-20 18:42:33 +0200868/* Must be called with SMI lock held */
869static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
870{
871 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
872 GLOBAL2_SCRATCH_BUSY);
873}
874
Andrew Lunn3898c142015-05-06 01:09:53 +0200875/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200876static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
877 int regnum)
Andrew Lunnf3044682015-02-14 19:17:50 +0100878{
879 int ret;
880
Andrew Lunn3898c142015-05-06 01:09:53 +0200881 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
882 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
883 regnum);
Andrew Lunnf3044682015-02-14 19:17:50 +0100884 if (ret < 0)
885 return ret;
886
Andrew Lunn3898c142015-05-06 01:09:53 +0200887 ret = _mv88e6xxx_phy_wait(ds);
888 if (ret < 0)
889 return ret;
890
891 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
Andrew Lunnf3044682015-02-14 19:17:50 +0100892}
893
Andrew Lunn3898c142015-05-06 01:09:53 +0200894/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200895static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
896 int regnum, u16 val)
Andrew Lunnf3044682015-02-14 19:17:50 +0100897{
Andrew Lunn3898c142015-05-06 01:09:53 +0200898 int ret;
Andrew Lunnf3044682015-02-14 19:17:50 +0100899
Andrew Lunn3898c142015-05-06 01:09:53 +0200900 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
901 if (ret < 0)
902 return ret;
903
904 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
905 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
906 regnum);
907
908 return _mv88e6xxx_phy_wait(ds);
Andrew Lunnf3044682015-02-14 19:17:50 +0100909}
910
Guenter Roeck11b3b452015-03-06 22:23:51 -0800911int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
912{
Andrew Lunn2f40c692015-04-02 04:06:37 +0200913 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800914 int reg;
915
Andrew Lunn3898c142015-05-06 01:09:53 +0200916 mutex_lock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200917
918 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800919 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200920 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800921
922 e->eee_enabled = !!(reg & 0x0200);
923 e->tx_lpi_enabled = !!(reg & 0x0100);
924
Andrew Lunn3898c142015-05-06 01:09:53 +0200925 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800926 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200927 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800928
Andrew Lunncca8b132015-04-02 04:06:39 +0200929 e->eee_active = !!(reg & PORT_STATUS_EEE);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200930 reg = 0;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800931
Andrew Lunn2f40c692015-04-02 04:06:37 +0200932out:
Andrew Lunn3898c142015-05-06 01:09:53 +0200933 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200934 return reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800935}
936
937int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
938 struct phy_device *phydev, struct ethtool_eee *e)
939{
Andrew Lunn2f40c692015-04-02 04:06:37 +0200940 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
941 int reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800942 int ret;
943
Andrew Lunn3898c142015-05-06 01:09:53 +0200944 mutex_lock(&ps->smi_mutex);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800945
Andrew Lunn2f40c692015-04-02 04:06:37 +0200946 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
947 if (ret < 0)
948 goto out;
949
950 reg = ret & ~0x0300;
951 if (e->eee_enabled)
952 reg |= 0x0200;
953 if (e->tx_lpi_enabled)
954 reg |= 0x0100;
955
956 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
957out:
Andrew Lunn3898c142015-05-06 01:09:53 +0200958 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200959
960 return ret;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800961}
962
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700963static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
964{
965 int ret;
966
Vivien Didelota08df0f2015-08-10 09:09:46 -0400967 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700968 if (ret < 0)
969 return ret;
970
Andrew Lunncca8b132015-04-02 04:06:39 +0200971 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700972 if (ret < 0)
973 return ret;
974
975 return _mv88e6xxx_atu_wait(ds);
976}
977
978static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
979{
980 int ret;
981
982 ret = _mv88e6xxx_atu_wait(ds);
983 if (ret < 0)
984 return ret;
985
Andrew Lunncca8b132015-04-02 04:06:39 +0200986 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700987}
988
989static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
990{
991 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Geert Uytterhoevenc3ffe6d2015-04-16 20:49:14 +0200992 int reg, ret = 0;
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700993 u8 oldstate;
994
995 mutex_lock(&ps->smi_mutex);
996
Andrew Lunncca8b132015-04-02 04:06:39 +0200997 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
Guenter Roeck538cc282015-04-15 22:12:42 -0700998 if (reg < 0) {
999 ret = reg;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001000 goto abort;
Guenter Roeck538cc282015-04-15 22:12:42 -07001001 }
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001002
Andrew Lunncca8b132015-04-02 04:06:39 +02001003 oldstate = reg & PORT_CONTROL_STATE_MASK;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001004 if (oldstate != state) {
1005 /* Flush forwarding database if we're moving a port
1006 * from Learning or Forwarding state to Disabled or
1007 * Blocking or Listening state.
1008 */
Andrew Lunncca8b132015-04-02 04:06:39 +02001009 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1010 state <= PORT_CONTROL_STATE_BLOCKING) {
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001011 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
1012 if (ret)
1013 goto abort;
1014 }
Andrew Lunncca8b132015-04-02 04:06:39 +02001015 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1016 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1017 reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001018 }
1019
1020abort:
1021 mutex_unlock(&ps->smi_mutex);
1022 return ret;
1023}
1024
1025/* Must be called with smi lock held */
1026static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1027{
1028 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1029 u8 fid = ps->fid[port];
1030 u16 reg = fid << 12;
1031
1032 if (dsa_is_cpu_port(ds, port))
1033 reg |= ds->phys_port_mask;
1034 else
1035 reg |= (ps->bridge_mask[fid] |
1036 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1037
Andrew Lunncca8b132015-04-02 04:06:39 +02001038 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001039}
1040
1041/* Must be called with smi lock held */
1042static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1043{
1044 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1045 int port;
1046 u32 mask;
1047 int ret;
1048
1049 mask = ds->phys_port_mask;
1050 while (mask) {
1051 port = __ffs(mask);
1052 mask &= ~(1 << port);
1053 if (ps->fid[port] != fid)
1054 continue;
1055
1056 ret = _mv88e6xxx_update_port_config(ds, port);
1057 if (ret)
1058 return ret;
1059 }
1060
1061 return _mv88e6xxx_flush_fid(ds, fid);
1062}
1063
1064/* Bridge handling functions */
1065
1066int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1067{
1068 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1069 int ret = 0;
1070 u32 nmask;
1071 int fid;
1072
1073 /* If the bridge group is not empty, join that group.
1074 * Otherwise create a new group.
1075 */
1076 fid = ps->fid[port];
1077 nmask = br_port_mask & ~(1 << port);
1078 if (nmask)
1079 fid = ps->fid[__ffs(nmask)];
1080
1081 nmask = ps->bridge_mask[fid] | (1 << port);
1082 if (nmask != br_port_mask) {
1083 netdev_err(ds->ports[port],
1084 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1085 fid, br_port_mask, nmask);
1086 return -EINVAL;
1087 }
1088
1089 mutex_lock(&ps->smi_mutex);
1090
1091 ps->bridge_mask[fid] = br_port_mask;
1092
1093 if (fid != ps->fid[port]) {
Vivien Didelot194fea72015-08-10 09:09:47 -04001094 clear_bit(ps->fid[port], ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001095 ps->fid[port] = fid;
1096 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1097 }
1098
1099 mutex_unlock(&ps->smi_mutex);
1100
1101 return ret;
1102}
1103
1104int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1105{
1106 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1107 u8 fid, newfid;
1108 int ret;
1109
1110 fid = ps->fid[port];
1111
1112 if (ps->bridge_mask[fid] != br_port_mask) {
1113 netdev_err(ds->ports[port],
1114 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1115 fid, br_port_mask, ps->bridge_mask[fid]);
1116 return -EINVAL;
1117 }
1118
1119 /* If the port was the last port of a bridge, we are done.
1120 * Otherwise assign a new fid to the port, and fix up
1121 * the bridge configuration.
1122 */
1123 if (br_port_mask == (1 << port))
1124 return 0;
1125
1126 mutex_lock(&ps->smi_mutex);
1127
Vivien Didelot194fea72015-08-10 09:09:47 -04001128 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1129 if (unlikely(newfid > ps->num_ports)) {
1130 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1131 ps->num_ports);
1132 ret = -ENOSPC;
1133 goto unlock;
1134 }
1135
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001136 ps->fid[port] = newfid;
Vivien Didelot194fea72015-08-10 09:09:47 -04001137 set_bit(newfid, ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001138 ps->bridge_mask[fid] &= ~(1 << port);
1139 ps->bridge_mask[newfid] = 1 << port;
1140
1141 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1142 if (!ret)
1143 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1144
Vivien Didelot194fea72015-08-10 09:09:47 -04001145unlock:
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001146 mutex_unlock(&ps->smi_mutex);
1147
1148 return ret;
1149}
1150
1151int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1152{
1153 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1154 int stp_state;
1155
1156 switch (state) {
1157 case BR_STATE_DISABLED:
Andrew Lunncca8b132015-04-02 04:06:39 +02001158 stp_state = PORT_CONTROL_STATE_DISABLED;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001159 break;
1160 case BR_STATE_BLOCKING:
1161 case BR_STATE_LISTENING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001162 stp_state = PORT_CONTROL_STATE_BLOCKING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001163 break;
1164 case BR_STATE_LEARNING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001165 stp_state = PORT_CONTROL_STATE_LEARNING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001166 break;
1167 case BR_STATE_FORWARDING:
1168 default:
Andrew Lunncca8b132015-04-02 04:06:39 +02001169 stp_state = PORT_CONTROL_STATE_FORWARDING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001170 break;
1171 }
1172
1173 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1174
1175 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1176 * so we can not update the port state directly but need to schedule it.
1177 */
1178 ps->port_state[port] = stp_state;
1179 set_bit(port, &ps->port_state_update_mask);
1180 schedule_work(&ps->bridge_work);
1181
1182 return 0;
1183}
1184
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001185static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1186 const unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001187{
1188 int i, ret;
1189
1190 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001191 ret = _mv88e6xxx_reg_write(
1192 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1193 (addr[i * 2] << 8) | addr[i * 2 + 1]);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001194 if (ret < 0)
1195 return ret;
1196 }
1197
1198 return 0;
1199}
1200
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001201static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001202{
1203 int i, ret;
1204
1205 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001206 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1207 GLOBAL_ATU_MAC_01 + i);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001208 if (ret < 0)
1209 return ret;
1210 addr[i * 2] = ret >> 8;
1211 addr[i * 2 + 1] = ret & 0xff;
1212 }
1213
1214 return 0;
1215}
1216
Vivien Didelotfd231c82015-08-10 09:09:50 -04001217static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1218 struct mv88e6xxx_atu_entry *entry)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001219{
Vivien Didelotfd231c82015-08-10 09:09:50 -04001220 u16 reg = 0;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001221 int ret;
1222
1223 ret = _mv88e6xxx_atu_wait(ds);
1224 if (ret < 0)
1225 return ret;
1226
Vivien Didelotfd231c82015-08-10 09:09:50 -04001227 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001228 if (ret < 0)
1229 return ret;
1230
Vivien Didelotfd231c82015-08-10 09:09:50 -04001231 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1232 unsigned int mask, shift;
1233
1234 if (entry->trunk) {
1235 reg |= GLOBAL_ATU_DATA_TRUNK;
1236 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1237 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1238 } else {
1239 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1240 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1241 }
1242
1243 reg |= (entry->portv_trunkid << shift) & mask;
1244 }
1245
1246 reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1247
1248 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
1249 if (ret < 0)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001250 return ret;
1251
Vivien Didelotfd231c82015-08-10 09:09:50 -04001252 return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
1253}
David S. Millercdf09692015-08-11 12:00:37 -07001254
Vivien Didelotfd231c82015-08-10 09:09:50 -04001255static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1256{
1257 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1258
1259 if (vid == 0)
1260 return ps->fid[port];
1261
1262 return -ENOENT;
1263}
1264
1265static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1266 const unsigned char *addr, u16 vid,
1267 u8 state)
1268{
1269 struct mv88e6xxx_atu_entry entry = { 0 };
1270 int ret;
1271
1272 ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
1273 if (ret < 0)
1274 return ret;
1275
1276 entry.fid = ret;
1277 entry.state = state;
1278 ether_addr_copy(entry.mac, addr);
1279 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1280 entry.trunk = false;
1281 entry.portv_trunkid = BIT(port);
1282 }
1283
1284 return _mv88e6xxx_atu_load(ds, &entry);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001285}
1286
David S. Millercdf09692015-08-11 12:00:37 -07001287int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1288 const unsigned char *addr, u16 vid)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001289{
David S. Millercdf09692015-08-11 12:00:37 -07001290 int state = is_multicast_ether_addr(addr) ?
1291 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1292 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1293 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot6630e232015-08-06 01:44:07 -04001294 int ret;
1295
David S. Millercdf09692015-08-11 12:00:37 -07001296 mutex_lock(&ps->smi_mutex);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001297 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
David S. Millercdf09692015-08-11 12:00:37 -07001298 mutex_unlock(&ps->smi_mutex);
1299
1300 return ret;
1301}
1302
1303int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1304 const unsigned char *addr, u16 vid)
1305{
1306 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1307 int ret;
1308
1309 mutex_lock(&ps->smi_mutex);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001310 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
David S. Millercdf09692015-08-11 12:00:37 -07001311 GLOBAL_ATU_DATA_STATE_UNUSED);
1312 mutex_unlock(&ps->smi_mutex);
1313
1314 return ret;
1315}
1316
Vivien Didelot1d194042015-08-10 09:09:51 -04001317static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1318 const unsigned char *addr,
1319 struct mv88e6xxx_atu_entry *entry)
David S. Millercdf09692015-08-11 12:00:37 -07001320{
Vivien Didelot1d194042015-08-10 09:09:51 -04001321 struct mv88e6xxx_atu_entry next = { 0 };
1322 int ret;
1323
1324 next.fid = fid;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001325
1326 ret = _mv88e6xxx_atu_wait(ds);
1327 if (ret < 0)
1328 return ret;
1329
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001330 ret = _mv88e6xxx_atu_mac_write(ds, addr);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001331 if (ret < 0)
1332 return ret;
1333
Vivien Didelot1d194042015-08-10 09:09:51 -04001334 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001335 if (ret < 0)
1336 return ret;
1337
Vivien Didelot1d194042015-08-10 09:09:51 -04001338 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1339 if (ret < 0)
1340 return ret;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001341
Vivien Didelot1d194042015-08-10 09:09:51 -04001342 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1343 if (ret < 0)
1344 return ret;
1345
1346 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1347 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1348 unsigned int mask, shift;
1349
1350 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1351 next.trunk = true;
1352 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1353 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1354 } else {
1355 next.trunk = false;
1356 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1357 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1358 }
1359
1360 next.portv_trunkid = (ret & mask) >> shift;
1361 }
1362
1363 *entry = next;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001364 return 0;
1365}
1366
David S. Millercdf09692015-08-11 12:00:37 -07001367/* get next entry for port */
1368int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
Vivien Didelot2a778e12015-08-10 09:09:49 -04001369 unsigned char *addr, u16 *vid, bool *is_static)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001370{
1371 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot1d194042015-08-10 09:09:51 -04001372 struct mv88e6xxx_atu_entry next;
1373 u16 fid;
Vivien Didelot87820512015-08-06 01:44:08 -04001374 int ret;
1375
1376 mutex_lock(&ps->smi_mutex);
Vivien Didelot1d194042015-08-10 09:09:51 -04001377
1378 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1379 if (ret < 0)
1380 goto unlock;
1381 fid = ret;
1382
1383 do {
1384 if (is_broadcast_ether_addr(addr)) {
1385 ret = -ENOENT;
1386 goto unlock;
1387 }
1388
1389 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1390 if (ret < 0)
1391 goto unlock;
1392
1393 ether_addr_copy(addr, next.mac);
1394
1395 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1396 continue;
1397 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
1398
1399 *is_static = next.state == (is_multicast_ether_addr(addr) ?
1400 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1401 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1402unlock:
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001403 mutex_unlock(&ps->smi_mutex);
1404
1405 return ret;
1406}
1407
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001408static void mv88e6xxx_bridge_work(struct work_struct *work)
1409{
1410 struct mv88e6xxx_priv_state *ps;
1411 struct dsa_switch *ds;
1412 int port;
1413
1414 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1415 ds = ((struct dsa_switch *)ps) - 1;
1416
1417 while (ps->port_state_update_mask) {
1418 port = __ffs(ps->port_state_update_mask);
1419 clear_bit(port, &ps->port_state_update_mask);
1420 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1421 }
1422}
1423
Andrew Lunndbde9e62015-05-06 01:09:48 +02001424static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
Guenter Roeckd827e882015-03-26 18:36:29 -07001425{
1426 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001427 int ret, fid;
Andrew Lunn54d792f2015-05-06 01:09:47 +02001428 u16 reg;
Guenter Roeckd827e882015-03-26 18:36:29 -07001429
1430 mutex_lock(&ps->smi_mutex);
1431
Andrew Lunn54d792f2015-05-06 01:09:47 +02001432 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1433 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1434 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001435 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001436 /* MAC Forcing register: don't force link, speed,
1437 * duplex or flow control state to any particular
1438 * values on physical ports, but force the CPU port
1439 * and all DSA ports to their maximum bandwidth and
1440 * full duplex.
1441 */
1442 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
1443 if (dsa_is_cpu_port(ds, port) ||
1444 ds->dsa_port_mask & (1 << port)) {
1445 reg |= PORT_PCS_CTRL_FORCE_LINK |
1446 PORT_PCS_CTRL_LINK_UP |
1447 PORT_PCS_CTRL_DUPLEX_FULL |
1448 PORT_PCS_CTRL_FORCE_DUPLEX;
1449 if (mv88e6xxx_6065_family(ds))
1450 reg |= PORT_PCS_CTRL_100;
1451 else
1452 reg |= PORT_PCS_CTRL_1000;
1453 } else {
1454 reg |= PORT_PCS_CTRL_UNFORCED;
1455 }
1456
1457 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1458 PORT_PCS_CTRL, reg);
1459 if (ret)
1460 goto abort;
1461 }
1462
1463 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
1464 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
1465 * tunneling, determine priority by looking at 802.1p and IP
1466 * priority fields (IP prio has precedence), and set STP state
1467 * to Forwarding.
1468 *
1469 * If this is the CPU link, use DSA or EDSA tagging depending
1470 * on which tagging mode was configured.
1471 *
1472 * If this is a link to another switch, use DSA tagging mode.
1473 *
1474 * If this is the upstream port for this switch, enable
1475 * forwarding of unknown unicasts and multicasts.
1476 */
1477 reg = 0;
1478 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1479 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1480 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001481 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02001482 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
1483 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
1484 PORT_CONTROL_STATE_FORWARDING;
1485 if (dsa_is_cpu_port(ds, port)) {
1486 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
1487 reg |= PORT_CONTROL_DSA_TAG;
1488 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001489 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1490 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001491 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1492 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
1493 else
1494 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1495 }
1496
1497 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1498 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1499 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001500 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001501 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
1502 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
1503 }
1504 }
1505 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1506 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001507 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
1508 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001509 if (ds->dsa_port_mask & (1 << port))
1510 reg |= PORT_CONTROL_FRAME_MODE_DSA;
1511 if (port == dsa_upstream_port(ds))
1512 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
1513 PORT_CONTROL_FORWARD_UNKNOWN_MC;
1514 }
1515 if (reg) {
1516 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1517 PORT_CONTROL, reg);
1518 if (ret)
1519 goto abort;
1520 }
1521
1522 /* Port Control 2: don't force a good FCS, set the maximum
1523 * frame size to 10240 bytes, don't let the switch add or
1524 * strip 802.1q tags, don't discard tagged or untagged frames
1525 * on this port, do a destination address lookup on all
1526 * received packets as usual, disable ARP mirroring and don't
1527 * send a copy of all transmitted/received frames on this port
1528 * to the CPU.
1529 */
1530 reg = 0;
1531 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1532 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001533 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02001534 reg = PORT_CONTROL_2_MAP_DA;
1535
1536 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001537 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02001538 reg |= PORT_CONTROL_2_JUMBO_10240;
1539
1540 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
1541 /* Set the upstream port this port should use */
1542 reg |= dsa_upstream_port(ds);
1543 /* enable forwarding of unknown multicast addresses to
1544 * the upstream port
1545 */
1546 if (port == dsa_upstream_port(ds))
1547 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
1548 }
1549
1550 if (reg) {
1551 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1552 PORT_CONTROL_2, reg);
1553 if (ret)
1554 goto abort;
1555 }
1556
1557 /* Port Association Vector: when learning source addresses
1558 * of packets, add the address to the address database using
1559 * a port bitmap that has only the bit for this port set and
1560 * the other bits clear.
1561 */
1562 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
1563 1 << port);
1564 if (ret)
1565 goto abort;
1566
1567 /* Egress rate control 2: disable egress rate control. */
1568 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
1569 0x0000);
1570 if (ret)
1571 goto abort;
1572
1573 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001574 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1575 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001576 /* Do not limit the period of time that this port can
1577 * be paused for by the remote end or the period of
1578 * time that this port can pause the remote end.
1579 */
1580 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1581 PORT_PAUSE_CTRL, 0x0000);
1582 if (ret)
1583 goto abort;
1584
1585 /* Port ATU control: disable limiting the number of
1586 * address database entries that this port is allowed
1587 * to use.
1588 */
1589 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1590 PORT_ATU_CONTROL, 0x0000);
1591 /* Priority Override: disable DA, SA and VTU priority
1592 * override.
1593 */
1594 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1595 PORT_PRI_OVERRIDE, 0x0000);
1596 if (ret)
1597 goto abort;
1598
1599 /* Port Ethertype: use the Ethertype DSA Ethertype
1600 * value.
1601 */
1602 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1603 PORT_ETH_TYPE, ETH_P_EDSA);
1604 if (ret)
1605 goto abort;
1606 /* Tag Remap: use an identity 802.1p prio -> switch
1607 * prio mapping.
1608 */
1609 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1610 PORT_TAG_REGMAP_0123, 0x3210);
1611 if (ret)
1612 goto abort;
1613
1614 /* Tag Remap 2: use an identity 802.1p prio -> switch
1615 * prio mapping.
1616 */
1617 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1618 PORT_TAG_REGMAP_4567, 0x7654);
1619 if (ret)
1620 goto abort;
1621 }
1622
1623 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1624 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001625 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
1626 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001627 /* Rate Control: disable ingress rate limiting. */
1628 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
1629 PORT_RATE_CONTROL, 0x0001);
1630 if (ret)
1631 goto abort;
1632 }
1633
Guenter Roeck366f0a02015-03-26 18:36:30 -07001634 /* Port Control 1: disable trunking, disable sending
1635 * learning messages to this port.
Guenter Roeckd827e882015-03-26 18:36:29 -07001636 */
Vivien Didelot614f03f2015-04-20 17:19:23 -04001637 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07001638 if (ret)
1639 goto abort;
1640
1641 /* Port based VLAN map: give each port its own address
1642 * database, allow the CPU port to talk to each of the 'real'
1643 * ports, and allow each of the 'real' ports to only talk to
1644 * the upstream port.
1645 */
Vivien Didelot194fea72015-08-10 09:09:47 -04001646 fid = port + 1;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001647 ps->fid[port] = fid;
Vivien Didelot194fea72015-08-10 09:09:47 -04001648 set_bit(fid, ps->fid_bitmap);
Guenter Roeckd827e882015-03-26 18:36:29 -07001649
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001650 if (!dsa_is_cpu_port(ds, port))
1651 ps->bridge_mask[fid] = 1 << port;
1652
1653 ret = _mv88e6xxx_update_port_config(ds, port);
Guenter Roeckd827e882015-03-26 18:36:29 -07001654 if (ret)
1655 goto abort;
1656
1657 /* Default VLAN ID and priority: don't set a default VLAN
1658 * ID, and set the default packet priority to zero.
1659 */
Vivien Didelot47cf1e652015-04-20 17:43:26 -04001660 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1661 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07001662abort:
1663 mutex_unlock(&ps->smi_mutex);
1664 return ret;
1665}
1666
Andrew Lunndbde9e62015-05-06 01:09:48 +02001667int mv88e6xxx_setup_ports(struct dsa_switch *ds)
1668{
1669 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1670 int ret;
1671 int i;
1672
1673 for (i = 0; i < ps->num_ports; i++) {
1674 ret = mv88e6xxx_setup_port(ds, i);
1675 if (ret < 0)
1676 return ret;
1677 }
1678 return 0;
1679}
1680
Andrew Lunn87c8cef2015-06-20 18:42:28 +02001681static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
1682{
1683 struct dsa_switch *ds = s->private;
1684
1685 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1686 int reg, port;
1687
1688 seq_puts(s, " GLOBAL GLOBAL2 ");
1689 for (port = 0 ; port < ps->num_ports; port++)
1690 seq_printf(s, " %2d ", port);
1691 seq_puts(s, "\n");
1692
1693 for (reg = 0; reg < 32; reg++) {
1694 seq_printf(s, "%2x: ", reg);
1695 seq_printf(s, " %4x %4x ",
1696 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
1697 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
1698
1699 for (port = 0 ; port < ps->num_ports; port++)
1700 seq_printf(s, "%4x ",
1701 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
1702 seq_puts(s, "\n");
1703 }
1704
1705 return 0;
1706}
1707
1708static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
1709{
1710 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
1711}
1712
1713static const struct file_operations mv88e6xxx_regs_fops = {
1714 .open = mv88e6xxx_regs_open,
1715 .read = seq_read,
1716 .llseek = no_llseek,
1717 .release = single_release,
1718 .owner = THIS_MODULE,
1719};
1720
Andrew Lunn8a0a2652015-06-20 18:42:29 +02001721static void mv88e6xxx_atu_show_header(struct seq_file *s)
1722{
1723 seq_puts(s, "DB T/P Vec State Addr\n");
1724}
1725
1726static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
1727 unsigned char *addr, int data)
1728{
1729 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
1730 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
1731 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
1732 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
1733
1734 seq_printf(s, "%03x %5s %10pb %x %pM\n",
1735 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
1736}
1737
1738static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
1739 int dbnum)
1740{
1741 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1742 unsigned char addr[6];
1743 int ret, data, state;
1744
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001745 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02001746 if (ret < 0)
1747 return ret;
1748
1749 do {
1750 ret = _mv88e6xxx_atu_cmd(ds, dbnum, GLOBAL_ATU_OP_GET_NEXT_DB);
1751 if (ret < 0)
1752 return ret;
1753 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1754 if (data < 0)
1755 return data;
1756
1757 state = data & GLOBAL_ATU_DATA_STATE_MASK;
1758 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
1759 break;
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001760 ret = _mv88e6xxx_atu_mac_read(ds, addr);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02001761 if (ret < 0)
1762 return ret;
1763 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
1764 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
1765
1766 return 0;
1767}
1768
1769static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
1770{
1771 struct dsa_switch *ds = s->private;
1772 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1773 int dbnum;
1774
1775 mv88e6xxx_atu_show_header(s);
1776
1777 for (dbnum = 0; dbnum < 255; dbnum++) {
1778 mutex_lock(&ps->smi_mutex);
1779 mv88e6xxx_atu_show_db(s, ds, dbnum);
1780 mutex_unlock(&ps->smi_mutex);
1781 }
1782
1783 return 0;
1784}
1785
1786static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
1787{
1788 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
1789}
1790
1791static const struct file_operations mv88e6xxx_atu_fops = {
1792 .open = mv88e6xxx_atu_open,
1793 .read = seq_read,
1794 .llseek = no_llseek,
1795 .release = single_release,
1796 .owner = THIS_MODULE,
1797};
1798
Andrew Lunn532c7a32015-06-20 18:42:31 +02001799static void mv88e6xxx_stats_show_header(struct seq_file *s,
1800 struct mv88e6xxx_priv_state *ps)
1801{
1802 int port;
1803
1804 seq_puts(s, " Statistic ");
1805 for (port = 0 ; port < ps->num_ports; port++)
1806 seq_printf(s, "Port %2d ", port);
1807 seq_puts(s, "\n");
1808}
1809
1810static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
1811{
1812 struct dsa_switch *ds = s->private;
1813 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1814 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
1815 int port, stat, max_stats;
1816 uint64_t value;
1817
1818 if (have_sw_in_discards(ds))
1819 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
1820 else
1821 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
1822
1823 mv88e6xxx_stats_show_header(s, ps);
1824
1825 mutex_lock(&ps->smi_mutex);
1826
1827 for (stat = 0; stat < max_stats; stat++) {
1828 seq_printf(s, "%19s: ", stats[stat].string);
1829 for (port = 0 ; port < ps->num_ports; port++) {
1830 _mv88e6xxx_stats_snapshot(ds, port);
1831 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
1832 port);
1833 seq_printf(s, "%8llu ", value);
1834 }
1835 seq_puts(s, "\n");
1836 }
1837 mutex_unlock(&ps->smi_mutex);
1838
1839 return 0;
1840}
1841
1842static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
1843{
1844 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
1845}
1846
1847static const struct file_operations mv88e6xxx_stats_fops = {
1848 .open = mv88e6xxx_stats_open,
1849 .read = seq_read,
1850 .llseek = no_llseek,
1851 .release = single_release,
1852 .owner = THIS_MODULE,
1853};
1854
Andrew Lunnd35bd872015-06-20 18:42:32 +02001855static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
1856{
1857 struct dsa_switch *ds = s->private;
1858 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1859 int target, ret;
1860
1861 seq_puts(s, "Target Port\n");
1862
1863 mutex_lock(&ps->smi_mutex);
1864 for (target = 0; target < 32; target++) {
1865 ret = _mv88e6xxx_reg_write(
1866 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
1867 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
1868 if (ret < 0)
1869 goto out;
1870 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
1871 GLOBAL2_DEVICE_MAPPING);
1872 seq_printf(s, " %2d %2d\n", target,
1873 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
1874 }
1875out:
1876 mutex_unlock(&ps->smi_mutex);
1877
1878 return 0;
1879}
1880
1881static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
1882{
1883 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
1884}
1885
1886static const struct file_operations mv88e6xxx_device_map_fops = {
1887 .open = mv88e6xxx_device_map_open,
1888 .read = seq_read,
1889 .llseek = no_llseek,
1890 .release = single_release,
1891 .owner = THIS_MODULE,
1892};
1893
Andrew Lunn56d95e22015-06-20 18:42:33 +02001894static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
1895{
1896 struct dsa_switch *ds = s->private;
1897 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1898 int reg, ret;
1899
1900 seq_puts(s, "Register Value\n");
1901
1902 mutex_lock(&ps->smi_mutex);
1903 for (reg = 0; reg < 0x80; reg++) {
1904 ret = _mv88e6xxx_reg_write(
1905 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
1906 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
1907 if (ret < 0)
1908 goto out;
1909
1910 ret = _mv88e6xxx_scratch_wait(ds);
1911 if (ret < 0)
1912 goto out;
1913
1914 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
1915 GLOBAL2_SCRATCH_MISC);
1916 seq_printf(s, " %2x %2x\n", reg,
1917 ret & GLOBAL2_SCRATCH_VALUE_MASK);
1918 }
1919out:
1920 mutex_unlock(&ps->smi_mutex);
1921
1922 return 0;
1923}
1924
1925static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
1926{
1927 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
1928}
1929
1930static const struct file_operations mv88e6xxx_scratch_fops = {
1931 .open = mv88e6xxx_scratch_open,
1932 .read = seq_read,
1933 .llseek = no_llseek,
1934 .release = single_release,
1935 .owner = THIS_MODULE,
1936};
1937
Guenter Roeckacdaffc2015-03-26 18:36:28 -07001938int mv88e6xxx_setup_common(struct dsa_switch *ds)
1939{
1940 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Andrew Lunn87c8cef2015-06-20 18:42:28 +02001941 char *name;
Guenter Roeckacdaffc2015-03-26 18:36:28 -07001942
1943 mutex_init(&ps->smi_mutex);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07001944
Andrew Lunncca8b132015-04-02 04:06:39 +02001945 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
Andrew Lunna8f064c2015-03-26 18:36:40 -07001946
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001947 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
1948
Andrew Lunn87c8cef2015-06-20 18:42:28 +02001949 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
1950 ps->dbgfs = debugfs_create_dir(name, NULL);
1951 kfree(name);
1952
1953 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
1954 &mv88e6xxx_regs_fops);
1955
Andrew Lunn8a0a2652015-06-20 18:42:29 +02001956 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
1957 &mv88e6xxx_atu_fops);
1958
Andrew Lunn532c7a32015-06-20 18:42:31 +02001959 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
1960 &mv88e6xxx_stats_fops);
1961
Andrew Lunnd35bd872015-06-20 18:42:32 +02001962 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
1963 &mv88e6xxx_device_map_fops);
Andrew Lunn56d95e22015-06-20 18:42:33 +02001964
1965 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
1966 &mv88e6xxx_scratch_fops);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07001967 return 0;
1968}
1969
Andrew Lunn54d792f2015-05-06 01:09:47 +02001970int mv88e6xxx_setup_global(struct dsa_switch *ds)
1971{
1972 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot24751e22015-08-03 09:17:44 -04001973 int ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02001974 int i;
1975
1976 /* Set the default address aging time to 5 minutes, and
1977 * enable address learn messages to be sent to all message
1978 * ports.
1979 */
1980 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
1981 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
1982
1983 /* Configure the IP ToS mapping registers. */
1984 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
1985 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
1986 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
1987 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
1988 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
1989 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
1990 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
1991 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
1992
1993 /* Configure the IEEE 802.1p priority mapping register. */
1994 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
1995
1996 /* Send all frames with destination addresses matching
1997 * 01:80:c2:00:00:0x to the CPU port.
1998 */
1999 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2000
2001 /* Ignore removed tag data on doubly tagged packets, disable
2002 * flow control messages, force flow control priority to the
2003 * highest, and send all special multicast frames to the CPU
2004 * port at the highest priority.
2005 */
2006 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2007 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2008 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2009
2010 /* Program the DSA routing table. */
2011 for (i = 0; i < 32; i++) {
2012 int nexthop = 0x1f;
2013
2014 if (ds->pd->rtable &&
2015 i != ds->index && i < ds->dst->pd->nr_chips)
2016 nexthop = ds->pd->rtable[i] & 0x1f;
2017
2018 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2019 GLOBAL2_DEVICE_MAPPING_UPDATE |
2020 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2021 nexthop);
2022 }
2023
2024 /* Clear all trunk masks. */
2025 for (i = 0; i < 8; i++)
2026 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2027 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2028 ((1 << ps->num_ports) - 1));
2029
2030 /* Clear all trunk mappings. */
2031 for (i = 0; i < 16; i++)
2032 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2033 GLOBAL2_TRUNK_MAPPING_UPDATE |
2034 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2035
2036 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002037 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2038 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002039 /* Send all frames with destination addresses matching
2040 * 01:80:c2:00:00:2x to the CPU port.
2041 */
2042 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2043
2044 /* Initialise cross-chip port VLAN table to reset
2045 * defaults.
2046 */
2047 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2048
2049 /* Clear the priority override table. */
2050 for (i = 0; i < 16; i++)
2051 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2052 0x8000 | (i << 8));
2053 }
2054
2055 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2056 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002057 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2058 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002059 /* Disable ingress rate limiting by resetting all
2060 * ingress rate limit registers to their initial
2061 * state.
2062 */
2063 for (i = 0; i < ps->num_ports; i++)
2064 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2065 0x9000 | (i << 8));
2066 }
2067
Andrew Lunndb687a52015-06-20 21:31:29 +02002068 /* Clear the statistics counters for all ports */
2069 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2070
2071 /* Wait for the flush to complete. */
Vivien Didelot24751e22015-08-03 09:17:44 -04002072 mutex_lock(&ps->smi_mutex);
2073 ret = _mv88e6xxx_stats_wait(ds);
2074 mutex_unlock(&ps->smi_mutex);
Andrew Lunndb687a52015-06-20 21:31:29 +02002075
Vivien Didelot24751e22015-08-03 09:17:44 -04002076 return ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002077}
2078
Andrew Lunn143a8302015-04-02 04:06:34 +02002079int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2080{
2081 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2082 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2083 unsigned long timeout;
2084 int ret;
2085 int i;
2086
2087 /* Set all ports to the disabled state. */
2088 for (i = 0; i < ps->num_ports; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02002089 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2090 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
Andrew Lunn143a8302015-04-02 04:06:34 +02002091 }
2092
2093 /* Wait for transmit queues to drain. */
2094 usleep_range(2000, 4000);
2095
2096 /* Reset the switch. Keep the PPU active if requested. The PPU
2097 * needs to be active to support indirect phy register access
2098 * through global registers 0x18 and 0x19.
2099 */
2100 if (ppu_active)
2101 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2102 else
2103 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2104
2105 /* Wait up to one second for reset to complete. */
2106 timeout = jiffies + 1 * HZ;
2107 while (time_before(jiffies, timeout)) {
2108 ret = REG_READ(REG_GLOBAL, 0x00);
2109 if ((ret & is_reset) == is_reset)
2110 break;
2111 usleep_range(1000, 2000);
2112 }
2113 if (time_after(jiffies, timeout))
2114 return -ETIMEDOUT;
2115
2116 return 0;
2117}
2118
Andrew Lunn491435852015-04-02 04:06:35 +02002119int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2120{
2121 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2122 int ret;
2123
Andrew Lunn3898c142015-05-06 01:09:53 +02002124 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002125 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002126 if (ret < 0)
2127 goto error;
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002128 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
Andrew Lunn491435852015-04-02 04:06:35 +02002129error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002130 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002131 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002132 return ret;
2133}
2134
2135int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2136 int reg, int val)
2137{
2138 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2139 int ret;
2140
Andrew Lunn3898c142015-05-06 01:09:53 +02002141 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002142 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002143 if (ret < 0)
2144 goto error;
2145
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002146 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
Andrew Lunn491435852015-04-02 04:06:35 +02002147error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002148 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002149 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002150 return ret;
2151}
2152
2153static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2154{
2155 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2156
2157 if (port >= 0 && port < ps->num_ports)
2158 return port;
2159 return -EINVAL;
2160}
2161
2162int
2163mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2164{
2165 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2166 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2167 int ret;
2168
2169 if (addr < 0)
2170 return addr;
2171
Andrew Lunn3898c142015-05-06 01:09:53 +02002172 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002173 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002174 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002175 return ret;
2176}
2177
2178int
2179mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2180{
2181 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2182 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2183 int ret;
2184
2185 if (addr < 0)
2186 return addr;
2187
Andrew Lunn3898c142015-05-06 01:09:53 +02002188 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002189 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002190 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002191 return ret;
2192}
2193
2194int
2195mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2196{
2197 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2198 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2199 int ret;
2200
2201 if (addr < 0)
2202 return addr;
2203
Andrew Lunn3898c142015-05-06 01:09:53 +02002204 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002205 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002206 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002207 return ret;
2208}
2209
2210int
2211mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2212 u16 val)
2213{
2214 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2215 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2216 int ret;
2217
2218 if (addr < 0)
2219 return addr;
2220
Andrew Lunn3898c142015-05-06 01:09:53 +02002221 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002222 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002223 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002224 return ret;
2225}
2226
Guenter Roeckc22995c2015-07-25 09:42:28 -07002227#ifdef CONFIG_NET_DSA_HWMON
2228
2229static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2230{
2231 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2232 int ret;
2233 int val;
2234
2235 *temp = 0;
2236
2237 mutex_lock(&ps->smi_mutex);
2238
2239 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2240 if (ret < 0)
2241 goto error;
2242
2243 /* Enable temperature sensor */
2244 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2245 if (ret < 0)
2246 goto error;
2247
2248 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2249 if (ret < 0)
2250 goto error;
2251
2252 /* Wait for temperature to stabilize */
2253 usleep_range(10000, 12000);
2254
2255 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2256 if (val < 0) {
2257 ret = val;
2258 goto error;
2259 }
2260
2261 /* Disable temperature sensor */
2262 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2263 if (ret < 0)
2264 goto error;
2265
2266 *temp = ((val & 0x1f) - 5) * 5;
2267
2268error:
2269 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2270 mutex_unlock(&ps->smi_mutex);
2271 return ret;
2272}
2273
2274static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2275{
2276 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2277 int ret;
2278
2279 *temp = 0;
2280
2281 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2282 if (ret < 0)
2283 return ret;
2284
2285 *temp = (ret & 0xff) - 25;
2286
2287 return 0;
2288}
2289
2290int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2291{
2292 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2293 return mv88e63xx_get_temp(ds, temp);
2294
2295 return mv88e61xx_get_temp(ds, temp);
2296}
2297
2298int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2299{
2300 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2301 int ret;
2302
2303 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2304 return -EOPNOTSUPP;
2305
2306 *temp = 0;
2307
2308 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2309 if (ret < 0)
2310 return ret;
2311
2312 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2313
2314 return 0;
2315}
2316
2317int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2318{
2319 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2320 int ret;
2321
2322 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2323 return -EOPNOTSUPP;
2324
2325 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2326 if (ret < 0)
2327 return ret;
2328 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2329 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2330 (ret & 0xe0ff) | (temp << 8));
2331}
2332
2333int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2334{
2335 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2336 int ret;
2337
2338 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2339 return -EOPNOTSUPP;
2340
2341 *alarm = false;
2342
2343 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2344 if (ret < 0)
2345 return ret;
2346
2347 *alarm = !!(ret & 0x40);
2348
2349 return 0;
2350}
2351#endif /* CONFIG_NET_DSA_HWMON */
2352
Ben Hutchings98e67302011-11-25 14:36:19 +00002353static int __init mv88e6xxx_init(void)
2354{
2355#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2356 register_switch_driver(&mv88e6131_switch_driver);
2357#endif
2358#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2359 register_switch_driver(&mv88e6123_61_65_switch_driver);
2360#endif
Guenter Roeck3ad50cc2014-10-29 10:44:56 -07002361#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2362 register_switch_driver(&mv88e6352_switch_driver);
2363#endif
Andrew Lunn42f27252014-09-12 23:58:44 +02002364#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2365 register_switch_driver(&mv88e6171_switch_driver);
2366#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002367 return 0;
2368}
2369module_init(mv88e6xxx_init);
2370
2371static void __exit mv88e6xxx_cleanup(void)
2372{
Andrew Lunn42f27252014-09-12 23:58:44 +02002373#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2374 unregister_switch_driver(&mv88e6171_switch_driver);
2375#endif
Vivien Didelot4212b542015-05-01 10:43:52 -04002376#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2377 unregister_switch_driver(&mv88e6352_switch_driver);
2378#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002379#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2380 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2381#endif
2382#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2383 unregister_switch_driver(&mv88e6131_switch_driver);
2384#endif
2385}
2386module_exit(mv88e6xxx_cleanup);
Ben Hutchings3d825ed2011-11-25 14:37:16 +00002387
2388MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2389MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2390MODULE_LICENSE("GPL");