blob: 6f13f72067621411383bc381e20b314427d6c8e6 [file] [log] [blame]
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001/*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
Vivien Didelotb8fee952015-08-13 12:52:19 -04005 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
Andrew Lunn87c8cef2015-06-20 18:42:28 +020014#include <linux/debugfs.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000015#include <linux/delay.h>
Guenter Roeckdefb05b2015-03-26 18:36:38 -070016#include <linux/etherdevice.h>
Andrew Lunndea87022015-08-31 15:56:47 +020017#include <linux/ethtool.h>
Guenter Roeckfacd95b2015-03-26 18:36:35 -070018#include <linux/if_bridge.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000019#include <linux/jiffies.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000020#include <linux/list.h>
Paul Gortmaker2bbba272012-01-24 10:41:40 +000021#include <linux/module.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000022#include <linux/netdevice.h>
23#include <linux/phy.h>
Andrew Lunn87c8cef2015-06-20 18:42:28 +020024#include <linux/seq_file.h>
Ben Hutchingsc8f0b862011-11-27 17:06:08 +000025#include <net/dsa.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000026#include "mv88e6xxx.h"
27
Andrew Lunn16fe24f2015-05-06 01:09:55 +020028/* MDIO bus access can be nested in the case of PHYs connected to the
29 * internal MDIO bus of the switch, which is accessed via MDIO bus of
30 * the Ethernet interface. Avoid lockdep false positives by using
31 * mutex_lock_nested().
32 */
33static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
34{
35 int ret;
36
37 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
38 ret = bus->read(bus, addr, regnum);
39 mutex_unlock(&bus->mdio_lock);
40
41 return ret;
42}
43
44static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
45 u16 val)
46{
47 int ret;
48
49 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
50 ret = bus->write(bus, addr, regnum, val);
51 mutex_unlock(&bus->mdio_lock);
52
53 return ret;
54}
55
Barry Grussling3675c8d2013-01-08 16:05:53 +000056/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000057 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
58 * will be directly accessible on some {device address,register address}
59 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
60 * will only respond to SMI transactions to that specific address, and
61 * an indirect addressing mechanism needs to be used to access its
62 * registers.
63 */
64static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
65{
66 int ret;
67 int i;
68
69 for (i = 0; i < 16; i++) {
Andrew Lunn16fe24f2015-05-06 01:09:55 +020070 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000071 if (ret < 0)
72 return ret;
73
Andrew Lunncca8b132015-04-02 04:06:39 +020074 if ((ret & SMI_CMD_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000075 return 0;
76 }
77
78 return -ETIMEDOUT;
79}
80
81int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
82{
83 int ret;
84
85 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +020086 return mv88e6xxx_mdiobus_read(bus, addr, reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000087
Barry Grussling3675c8d2013-01-08 16:05:53 +000088 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000089 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
90 if (ret < 0)
91 return ret;
92
Barry Grussling3675c8d2013-01-08 16:05:53 +000093 /* Transmit the read command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +020094 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
95 SMI_CMD_OP_22_READ | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000096 if (ret < 0)
97 return ret;
98
Barry Grussling3675c8d2013-01-08 16:05:53 +000099 /* Wait for the read command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000100 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
101 if (ret < 0)
102 return ret;
103
Barry Grussling3675c8d2013-01-08 16:05:53 +0000104 /* Read the data. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200105 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000106 if (ret < 0)
107 return ret;
108
109 return ret & 0xffff;
110}
111
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700112/* Must be called with SMI mutex held */
113static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000114{
Guenter Roeckb184e492014-10-17 12:30:58 -0700115 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000116 int ret;
117
Guenter Roeckb184e492014-10-17 12:30:58 -0700118 if (bus == NULL)
119 return -EINVAL;
120
Guenter Roeckb184e492014-10-17 12:30:58 -0700121 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500122 if (ret < 0)
123 return ret;
124
125 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
126 addr, reg, ret);
127
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000128 return ret;
129}
130
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700131int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
132{
133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
134 int ret;
135
136 mutex_lock(&ps->smi_mutex);
137 ret = _mv88e6xxx_reg_read(ds, addr, reg);
138 mutex_unlock(&ps->smi_mutex);
139
140 return ret;
141}
142
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000143int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
144 int reg, u16 val)
145{
146 int ret;
147
148 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200149 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000150
Barry Grussling3675c8d2013-01-08 16:05:53 +0000151 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000152 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
153 if (ret < 0)
154 return ret;
155
Barry Grussling3675c8d2013-01-08 16:05:53 +0000156 /* Transmit the data to write. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200157 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000158 if (ret < 0)
159 return ret;
160
Barry Grussling3675c8d2013-01-08 16:05:53 +0000161 /* Transmit the write command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200162 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
163 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000164 if (ret < 0)
165 return ret;
166
Barry Grussling3675c8d2013-01-08 16:05:53 +0000167 /* Wait for the write command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000168 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
169 if (ret < 0)
170 return ret;
171
172 return 0;
173}
174
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700175/* Must be called with SMI mutex held */
176static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
177 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000178{
Guenter Roeckb184e492014-10-17 12:30:58 -0700179 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000180
Guenter Roeckb184e492014-10-17 12:30:58 -0700181 if (bus == NULL)
182 return -EINVAL;
183
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500184 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
185 addr, reg, val);
186
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700187 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
188}
189
190int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
191{
192 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
193 int ret;
194
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000195 mutex_lock(&ps->smi_mutex);
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700196 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000197 mutex_unlock(&ps->smi_mutex);
198
199 return ret;
200}
201
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000202int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
203{
Andrew Lunncca8b132015-04-02 04:06:39 +0200204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
205 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
206 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000207
208 return 0;
209}
210
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000211int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
212{
213 int i;
214 int ret;
215
216 for (i = 0; i < 6; i++) {
217 int j;
218
Barry Grussling3675c8d2013-01-08 16:05:53 +0000219 /* Write the MAC address byte. */
Andrew Lunncca8b132015-04-02 04:06:39 +0200220 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
221 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000222
Barry Grussling3675c8d2013-01-08 16:05:53 +0000223 /* Wait for the write to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000224 for (j = 0; j < 16; j++) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200225 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
226 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000227 break;
228 }
229 if (j == 16)
230 return -ETIMEDOUT;
231 }
232
233 return 0;
234}
235
Andrew Lunn3898c142015-05-06 01:09:53 +0200236/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200237static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000238{
239 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200240 return _mv88e6xxx_reg_read(ds, addr, regnum);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000241 return 0xffff;
242}
243
Andrew Lunn3898c142015-05-06 01:09:53 +0200244/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200245static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
246 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000247{
248 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200249 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000250 return 0;
251}
252
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000253#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
254static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
255{
256 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000257 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000258
Andrew Lunncca8b132015-04-02 04:06:39 +0200259 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
260 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
261 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000262
Barry Grussling19b2f972013-01-08 16:05:54 +0000263 timeout = jiffies + 1 * HZ;
264 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200265 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000266 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200267 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
268 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000269 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000270 }
271
272 return -ETIMEDOUT;
273}
274
275static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
276{
277 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000278 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000279
Andrew Lunncca8b132015-04-02 04:06:39 +0200280 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
281 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000282
Barry Grussling19b2f972013-01-08 16:05:54 +0000283 timeout = jiffies + 1 * HZ;
284 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200285 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000286 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200287 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
288 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000289 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000290 }
291
292 return -ETIMEDOUT;
293}
294
295static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
296{
297 struct mv88e6xxx_priv_state *ps;
298
299 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
300 if (mutex_trylock(&ps->ppu_mutex)) {
Barry Grussling85686582013-01-08 16:05:56 +0000301 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000302
Barry Grussling85686582013-01-08 16:05:56 +0000303 if (mv88e6xxx_ppu_enable(ds) == 0)
304 ps->ppu_disabled = 0;
305 mutex_unlock(&ps->ppu_mutex);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000306 }
307}
308
309static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
310{
311 struct mv88e6xxx_priv_state *ps = (void *)_ps;
312
313 schedule_work(&ps->ppu_work);
314}
315
316static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
317{
Florian Fainellia22adce2014-04-28 11:14:28 -0700318 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000319 int ret;
320
321 mutex_lock(&ps->ppu_mutex);
322
Barry Grussling3675c8d2013-01-08 16:05:53 +0000323 /* If the PHY polling unit is enabled, disable it so that
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000324 * we can access the PHY registers. If it was already
325 * disabled, cancel the timer that is going to re-enable
326 * it.
327 */
328 if (!ps->ppu_disabled) {
Barry Grussling85686582013-01-08 16:05:56 +0000329 ret = mv88e6xxx_ppu_disable(ds);
330 if (ret < 0) {
331 mutex_unlock(&ps->ppu_mutex);
332 return ret;
333 }
334 ps->ppu_disabled = 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000335 } else {
Barry Grussling85686582013-01-08 16:05:56 +0000336 del_timer(&ps->ppu_timer);
337 ret = 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000338 }
339
340 return ret;
341}
342
343static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
344{
Florian Fainellia22adce2014-04-28 11:14:28 -0700345 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000346
Barry Grussling3675c8d2013-01-08 16:05:53 +0000347 /* Schedule a timer to re-enable the PHY polling unit. */
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000348 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
349 mutex_unlock(&ps->ppu_mutex);
350}
351
352void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
353{
Florian Fainellia22adce2014-04-28 11:14:28 -0700354 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000355
356 mutex_init(&ps->ppu_mutex);
357 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
358 init_timer(&ps->ppu_timer);
359 ps->ppu_timer.data = (unsigned long)ps;
360 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
361}
362
363int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
364{
365 int ret;
366
367 ret = mv88e6xxx_ppu_access_get(ds);
368 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000369 ret = mv88e6xxx_reg_read(ds, addr, regnum);
370 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000371 }
372
373 return ret;
374}
375
376int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
377 int regnum, u16 val)
378{
379 int ret;
380
381 ret = mv88e6xxx_ppu_access_get(ds);
382 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000383 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
384 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000385 }
386
387 return ret;
388}
389#endif
390
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000391void mv88e6xxx_poll_link(struct dsa_switch *ds)
392{
393 int i;
394
395 for (i = 0; i < DSA_MAX_PORTS; i++) {
396 struct net_device *dev;
Ingo Molnar2a9e7972008-11-25 16:50:49 -0800397 int uninitialized_var(port_status);
Andrew Lunn8b59d192015-08-31 15:56:52 +0200398 int pcs_ctrl;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000399 int link;
400 int speed;
401 int duplex;
402 int fc;
403
404 dev = ds->ports[i];
405 if (dev == NULL)
406 continue;
407
Andrew Lunn8b59d192015-08-31 15:56:52 +0200408 pcs_ctrl = mv88e6xxx_reg_read(ds, REG_PORT(i), PORT_PCS_CTRL);
409 if (pcs_ctrl < 0 || pcs_ctrl & PORT_PCS_CTRL_FORCE_LINK)
410 continue;
411
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000412 link = 0;
413 if (dev->flags & IFF_UP) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200414 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
415 PORT_STATUS);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000416 if (port_status < 0)
417 continue;
418
Andrew Lunncca8b132015-04-02 04:06:39 +0200419 link = !!(port_status & PORT_STATUS_LINK);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000420 }
421
422 if (!link) {
423 if (netif_carrier_ok(dev)) {
Barry Grusslingab381a92013-01-08 16:05:55 +0000424 netdev_info(dev, "link down\n");
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000425 netif_carrier_off(dev);
426 }
427 continue;
428 }
429
Andrew Lunncca8b132015-04-02 04:06:39 +0200430 switch (port_status & PORT_STATUS_SPEED_MASK) {
431 case PORT_STATUS_SPEED_10:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000432 speed = 10;
433 break;
Andrew Lunncca8b132015-04-02 04:06:39 +0200434 case PORT_STATUS_SPEED_100:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000435 speed = 100;
436 break;
Andrew Lunncca8b132015-04-02 04:06:39 +0200437 case PORT_STATUS_SPEED_1000:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000438 speed = 1000;
439 break;
440 default:
441 speed = -1;
442 break;
443 }
Andrew Lunncca8b132015-04-02 04:06:39 +0200444 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
445 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000446
447 if (!netif_carrier_ok(dev)) {
Barry Grusslingab381a92013-01-08 16:05:55 +0000448 netdev_info(dev,
449 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
450 speed,
451 duplex ? "full" : "half",
452 fc ? "en" : "dis");
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000453 netif_carrier_on(dev);
454 }
455 }
456}
457
Andrew Lunn54d792f2015-05-06 01:09:47 +0200458static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
459{
460 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
461
462 switch (ps->id) {
463 case PORT_SWITCH_ID_6031:
464 case PORT_SWITCH_ID_6061:
465 case PORT_SWITCH_ID_6035:
466 case PORT_SWITCH_ID_6065:
467 return true;
468 }
469 return false;
470}
471
472static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
473{
474 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
475
476 switch (ps->id) {
477 case PORT_SWITCH_ID_6092:
478 case PORT_SWITCH_ID_6095:
479 return true;
480 }
481 return false;
482}
483
484static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
485{
486 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
487
488 switch (ps->id) {
489 case PORT_SWITCH_ID_6046:
490 case PORT_SWITCH_ID_6085:
491 case PORT_SWITCH_ID_6096:
492 case PORT_SWITCH_ID_6097:
493 return true;
494 }
495 return false;
496}
497
498static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
499{
500 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
501
502 switch (ps->id) {
503 case PORT_SWITCH_ID_6123:
504 case PORT_SWITCH_ID_6161:
505 case PORT_SWITCH_ID_6165:
506 return true;
507 }
508 return false;
509}
510
511static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
512{
513 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
514
515 switch (ps->id) {
516 case PORT_SWITCH_ID_6121:
517 case PORT_SWITCH_ID_6122:
518 case PORT_SWITCH_ID_6152:
519 case PORT_SWITCH_ID_6155:
520 case PORT_SWITCH_ID_6182:
521 case PORT_SWITCH_ID_6185:
522 case PORT_SWITCH_ID_6108:
523 case PORT_SWITCH_ID_6131:
524 return true;
525 }
526 return false;
527}
528
Guenter Roeckc22995c2015-07-25 09:42:28 -0700529static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700530{
531 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
532
533 switch (ps->id) {
534 case PORT_SWITCH_ID_6320:
535 case PORT_SWITCH_ID_6321:
536 return true;
537 }
538 return false;
539}
540
Andrew Lunn54d792f2015-05-06 01:09:47 +0200541static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
542{
543 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
544
545 switch (ps->id) {
546 case PORT_SWITCH_ID_6171:
547 case PORT_SWITCH_ID_6175:
548 case PORT_SWITCH_ID_6350:
549 case PORT_SWITCH_ID_6351:
550 return true;
551 }
552 return false;
553}
554
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200555static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
556{
557 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
558
559 switch (ps->id) {
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200560 case PORT_SWITCH_ID_6172:
561 case PORT_SWITCH_ID_6176:
Andrew Lunn54d792f2015-05-06 01:09:47 +0200562 case PORT_SWITCH_ID_6240:
563 case PORT_SWITCH_ID_6352:
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200564 return true;
565 }
566 return false;
567}
568
Andrew Lunndea87022015-08-31 15:56:47 +0200569/* We expect the switch to perform auto negotiation if there is a real
570 * phy. However, in the case of a fixed link phy, we force the port
571 * settings from the fixed link settings.
572 */
573void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
574 struct phy_device *phydev)
575{
576 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
577 u32 ret, reg;
578
579 if (!phy_is_pseudo_fixed_link(phydev))
580 return;
581
582 mutex_lock(&ps->smi_mutex);
583
584 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
585 if (ret < 0)
586 goto out;
587
588 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
589 PORT_PCS_CTRL_FORCE_LINK |
590 PORT_PCS_CTRL_DUPLEX_FULL |
591 PORT_PCS_CTRL_FORCE_DUPLEX |
592 PORT_PCS_CTRL_UNFORCED);
593
594 reg |= PORT_PCS_CTRL_FORCE_LINK;
595 if (phydev->link)
596 reg |= PORT_PCS_CTRL_LINK_UP;
597
598 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
599 goto out;
600
601 switch (phydev->speed) {
602 case SPEED_1000:
603 reg |= PORT_PCS_CTRL_1000;
604 break;
605 case SPEED_100:
606 reg |= PORT_PCS_CTRL_100;
607 break;
608 case SPEED_10:
609 reg |= PORT_PCS_CTRL_10;
610 break;
611 default:
612 pr_info("Unknown speed");
613 goto out;
614 }
615
616 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
617 if (phydev->duplex == DUPLEX_FULL)
618 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
619
Andrew Lunne7e72ac2015-08-31 15:56:51 +0200620 if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
621 (port >= ps->num_ports - 2)) {
622 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
623 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
624 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
625 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
626 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
627 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
628 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
629 }
Andrew Lunndea87022015-08-31 15:56:47 +0200630 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
631
632out:
633 mutex_unlock(&ps->smi_mutex);
634}
635
Andrew Lunn31888232015-05-06 01:09:54 +0200636/* Must be called with SMI mutex held */
637static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000638{
639 int ret;
640 int i;
641
642 for (i = 0; i < 10; i++) {
Andrew Lunn31888232015-05-06 01:09:54 +0200643 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
Andrew Lunncca8b132015-04-02 04:06:39 +0200644 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000645 return 0;
646 }
647
648 return -ETIMEDOUT;
649}
650
Andrew Lunn31888232015-05-06 01:09:54 +0200651/* Must be called with SMI mutex held */
652static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000653{
654 int ret;
655
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700656 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200657 port = (port + 1) << 5;
658
Barry Grussling3675c8d2013-01-08 16:05:53 +0000659 /* Snapshot the hardware statistics counters for this port. */
Andrew Lunn31888232015-05-06 01:09:54 +0200660 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
661 GLOBAL_STATS_OP_CAPTURE_PORT |
662 GLOBAL_STATS_OP_HIST_RX_TX | port);
663 if (ret < 0)
664 return ret;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000665
Barry Grussling3675c8d2013-01-08 16:05:53 +0000666 /* Wait for the snapshotting to complete. */
Andrew Lunn31888232015-05-06 01:09:54 +0200667 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000668 if (ret < 0)
669 return ret;
670
671 return 0;
672}
673
Andrew Lunn31888232015-05-06 01:09:54 +0200674/* Must be called with SMI mutex held */
675static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000676{
677 u32 _val;
678 int ret;
679
680 *val = 0;
681
Andrew Lunn31888232015-05-06 01:09:54 +0200682 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
683 GLOBAL_STATS_OP_READ_CAPTURED |
684 GLOBAL_STATS_OP_HIST_RX_TX | stat);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000685 if (ret < 0)
686 return;
687
Andrew Lunn31888232015-05-06 01:09:54 +0200688 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000689 if (ret < 0)
690 return;
691
Andrew Lunn31888232015-05-06 01:09:54 +0200692 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000693 if (ret < 0)
694 return;
695
696 _val = ret << 16;
697
Andrew Lunn31888232015-05-06 01:09:54 +0200698 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000699 if (ret < 0)
700 return;
701
702 *val = _val | ret;
703}
704
Andrew Lunne413e7e2015-04-02 04:06:38 +0200705static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
706 { "in_good_octets", 8, 0x00, },
707 { "in_bad_octets", 4, 0x02, },
708 { "in_unicast", 4, 0x04, },
709 { "in_broadcasts", 4, 0x06, },
710 { "in_multicasts", 4, 0x07, },
711 { "in_pause", 4, 0x16, },
712 { "in_undersize", 4, 0x18, },
713 { "in_fragments", 4, 0x19, },
714 { "in_oversize", 4, 0x1a, },
715 { "in_jabber", 4, 0x1b, },
716 { "in_rx_error", 4, 0x1c, },
717 { "in_fcs_error", 4, 0x1d, },
718 { "out_octets", 8, 0x0e, },
719 { "out_unicast", 4, 0x10, },
720 { "out_broadcasts", 4, 0x13, },
721 { "out_multicasts", 4, 0x12, },
722 { "out_pause", 4, 0x15, },
723 { "excessive", 4, 0x11, },
724 { "collisions", 4, 0x1e, },
725 { "deferred", 4, 0x05, },
726 { "single", 4, 0x14, },
727 { "multiple", 4, 0x17, },
728 { "out_fcs_error", 4, 0x03, },
729 { "late", 4, 0x1f, },
730 { "hist_64bytes", 4, 0x08, },
731 { "hist_65_127bytes", 4, 0x09, },
732 { "hist_128_255bytes", 4, 0x0a, },
733 { "hist_256_511bytes", 4, 0x0b, },
734 { "hist_512_1023bytes", 4, 0x0c, },
735 { "hist_1024_max_bytes", 4, 0x0d, },
736 /* Not all devices have the following counters */
737 { "sw_in_discards", 4, 0x110, },
738 { "sw_in_filtered", 2, 0x112, },
739 { "sw_out_filtered", 2, 0x113, },
740
741};
742
743static bool have_sw_in_discards(struct dsa_switch *ds)
744{
745 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
746
747 switch (ps->id) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200748 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
749 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
750 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
751 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
752 case PORT_SWITCH_ID_6352:
Andrew Lunne413e7e2015-04-02 04:06:38 +0200753 return true;
754 default:
755 return false;
756 }
757}
758
759static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
760 int nr_stats,
761 struct mv88e6xxx_hw_stat *stats,
762 int port, uint8_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000763{
764 int i;
765
766 for (i = 0; i < nr_stats; i++) {
767 memcpy(data + i * ETH_GSTRING_LEN,
768 stats[i].string, ETH_GSTRING_LEN);
769 }
770}
771
Andrew Lunn80c46272015-06-20 18:42:30 +0200772static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
773 int stat,
774 struct mv88e6xxx_hw_stat *stats,
775 int port)
776{
777 struct mv88e6xxx_hw_stat *s = stats + stat;
778 u32 low;
779 u32 high = 0;
780 int ret;
781 u64 value;
782
783 if (s->reg >= 0x100) {
784 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
785 s->reg - 0x100);
786 if (ret < 0)
787 return UINT64_MAX;
788
789 low = ret;
790 if (s->sizeof_stat == 4) {
791 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
792 s->reg - 0x100 + 1);
793 if (ret < 0)
794 return UINT64_MAX;
795 high = ret;
796 }
797 } else {
798 _mv88e6xxx_stats_read(ds, s->reg, &low);
799 if (s->sizeof_stat == 8)
800 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
801 }
802 value = (((u64)high) << 16) | low;
803 return value;
804}
805
Andrew Lunne413e7e2015-04-02 04:06:38 +0200806static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
807 int nr_stats,
808 struct mv88e6xxx_hw_stat *stats,
809 int port, uint64_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000810{
Florian Fainellia22adce2014-04-28 11:14:28 -0700811 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000812 int ret;
813 int i;
814
Andrew Lunn31888232015-05-06 01:09:54 +0200815 mutex_lock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000816
Andrew Lunn31888232015-05-06 01:09:54 +0200817 ret = _mv88e6xxx_stats_snapshot(ds, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000818 if (ret < 0) {
Andrew Lunn31888232015-05-06 01:09:54 +0200819 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000820 return;
821 }
822
Barry Grussling3675c8d2013-01-08 16:05:53 +0000823 /* Read each of the counters. */
Andrew Lunn80c46272015-06-20 18:42:30 +0200824 for (i = 0; i < nr_stats; i++)
825 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000826
Andrew Lunn31888232015-05-06 01:09:54 +0200827 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000828}
Ben Hutchings98e67302011-11-25 14:36:19 +0000829
Andrew Lunne413e7e2015-04-02 04:06:38 +0200830/* All the statistics in the table */
831void
832mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
833{
834 if (have_sw_in_discards(ds))
835 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
836 mv88e6xxx_hw_stats, port, data);
837 else
838 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
839 mv88e6xxx_hw_stats, port, data);
840}
841
842int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
843{
844 if (have_sw_in_discards(ds))
845 return ARRAY_SIZE(mv88e6xxx_hw_stats);
846 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
847}
848
849void
850mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
851 int port, uint64_t *data)
852{
853 if (have_sw_in_discards(ds))
854 _mv88e6xxx_get_ethtool_stats(
855 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
856 mv88e6xxx_hw_stats, port, data);
857 else
858 _mv88e6xxx_get_ethtool_stats(
859 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
860 mv88e6xxx_hw_stats, port, data);
861}
862
Guenter Roecka1ab91f2014-10-29 10:45:05 -0700863int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
864{
865 return 32 * sizeof(u16);
866}
867
868void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
869 struct ethtool_regs *regs, void *_p)
870{
871 u16 *p = _p;
872 int i;
873
874 regs->version = 0;
875
876 memset(p, 0xff, 32 * sizeof(u16));
877
878 for (i = 0; i < 32; i++) {
879 int ret;
880
881 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
882 if (ret >= 0)
883 p[i] = ret;
884 }
885}
886
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700887/* Must be called with SMI lock held */
Andrew Lunn3898c142015-05-06 01:09:53 +0200888static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
889 u16 mask)
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700890{
891 unsigned long timeout = jiffies + HZ / 10;
892
893 while (time_before(jiffies, timeout)) {
894 int ret;
895
896 ret = _mv88e6xxx_reg_read(ds, reg, offset);
897 if (ret < 0)
898 return ret;
899 if (!(ret & mask))
900 return 0;
901
902 usleep_range(1000, 2000);
903 }
904 return -ETIMEDOUT;
905}
906
Andrew Lunn3898c142015-05-06 01:09:53 +0200907static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
908{
909 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
910 int ret;
911
912 mutex_lock(&ps->smi_mutex);
913 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
914 mutex_unlock(&ps->smi_mutex);
915
916 return ret;
917}
918
919static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
920{
921 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
922 GLOBAL2_SMI_OP_BUSY);
923}
924
925int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
926{
927 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
928 GLOBAL2_EEPROM_OP_LOAD);
929}
930
931int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
932{
933 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
934 GLOBAL2_EEPROM_OP_BUSY);
935}
936
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700937/* Must be called with SMI lock held */
938static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
939{
Andrew Lunncca8b132015-04-02 04:06:39 +0200940 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
941 GLOBAL_ATU_OP_BUSY);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700942}
943
Andrew Lunn56d95e22015-06-20 18:42:33 +0200944/* Must be called with SMI lock held */
945static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
946{
947 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
948 GLOBAL2_SCRATCH_BUSY);
949}
950
Andrew Lunn3898c142015-05-06 01:09:53 +0200951/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200952static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
953 int regnum)
Andrew Lunnf3044682015-02-14 19:17:50 +0100954{
955 int ret;
956
Andrew Lunn3898c142015-05-06 01:09:53 +0200957 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
958 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
959 regnum);
Andrew Lunnf3044682015-02-14 19:17:50 +0100960 if (ret < 0)
961 return ret;
962
Andrew Lunn3898c142015-05-06 01:09:53 +0200963 ret = _mv88e6xxx_phy_wait(ds);
964 if (ret < 0)
965 return ret;
966
967 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
Andrew Lunnf3044682015-02-14 19:17:50 +0100968}
969
Andrew Lunn3898c142015-05-06 01:09:53 +0200970/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200971static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
972 int regnum, u16 val)
Andrew Lunnf3044682015-02-14 19:17:50 +0100973{
Andrew Lunn3898c142015-05-06 01:09:53 +0200974 int ret;
Andrew Lunnf3044682015-02-14 19:17:50 +0100975
Andrew Lunn3898c142015-05-06 01:09:53 +0200976 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
977 if (ret < 0)
978 return ret;
979
980 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
981 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
982 regnum);
983
984 return _mv88e6xxx_phy_wait(ds);
Andrew Lunnf3044682015-02-14 19:17:50 +0100985}
986
Guenter Roeck11b3b452015-03-06 22:23:51 -0800987int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
988{
Andrew Lunn2f40c692015-04-02 04:06:37 +0200989 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800990 int reg;
991
Andrew Lunn3898c142015-05-06 01:09:53 +0200992 mutex_lock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200993
994 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800995 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200996 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800997
998 e->eee_enabled = !!(reg & 0x0200);
999 e->tx_lpi_enabled = !!(reg & 0x0100);
1000
Andrew Lunn3898c142015-05-06 01:09:53 +02001001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
Guenter Roeck11b3b452015-03-06 22:23:51 -08001002 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +02001003 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001004
Andrew Lunncca8b132015-04-02 04:06:39 +02001005 e->eee_active = !!(reg & PORT_STATUS_EEE);
Andrew Lunn2f40c692015-04-02 04:06:37 +02001006 reg = 0;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001007
Andrew Lunn2f40c692015-04-02 04:06:37 +02001008out:
Andrew Lunn3898c142015-05-06 01:09:53 +02001009 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +02001010 return reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001011}
1012
1013int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
1014 struct phy_device *phydev, struct ethtool_eee *e)
1015{
Andrew Lunn2f40c692015-04-02 04:06:37 +02001016 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1017 int reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001018 int ret;
1019
Andrew Lunn3898c142015-05-06 01:09:53 +02001020 mutex_lock(&ps->smi_mutex);
Guenter Roeck11b3b452015-03-06 22:23:51 -08001021
Andrew Lunn2f40c692015-04-02 04:06:37 +02001022 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
1023 if (ret < 0)
1024 goto out;
1025
1026 reg = ret & ~0x0300;
1027 if (e->eee_enabled)
1028 reg |= 0x0200;
1029 if (e->tx_lpi_enabled)
1030 reg |= 0x0100;
1031
1032 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
1033out:
Andrew Lunn3898c142015-05-06 01:09:53 +02001034 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +02001035
1036 return ret;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001037}
1038
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001039static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
1040{
1041 int ret;
1042
Vivien Didelota08df0f2015-08-10 09:09:46 -04001043 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001044 if (ret < 0)
1045 return ret;
1046
Andrew Lunncca8b132015-04-02 04:06:39 +02001047 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001048 if (ret < 0)
1049 return ret;
1050
1051 return _mv88e6xxx_atu_wait(ds);
1052}
1053
1054static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
1055{
1056 int ret;
1057
1058 ret = _mv88e6xxx_atu_wait(ds);
1059 if (ret < 0)
1060 return ret;
1061
Andrew Lunncca8b132015-04-02 04:06:39 +02001062 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001063}
1064
1065static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1066{
1067 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Geert Uytterhoevenc3ffe6d2015-04-16 20:49:14 +02001068 int reg, ret = 0;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001069 u8 oldstate;
1070
1071 mutex_lock(&ps->smi_mutex);
1072
Andrew Lunncca8b132015-04-02 04:06:39 +02001073 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
Guenter Roeck538cc282015-04-15 22:12:42 -07001074 if (reg < 0) {
1075 ret = reg;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001076 goto abort;
Guenter Roeck538cc282015-04-15 22:12:42 -07001077 }
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001078
Andrew Lunncca8b132015-04-02 04:06:39 +02001079 oldstate = reg & PORT_CONTROL_STATE_MASK;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001080 if (oldstate != state) {
1081 /* Flush forwarding database if we're moving a port
1082 * from Learning or Forwarding state to Disabled or
1083 * Blocking or Listening state.
1084 */
Andrew Lunncca8b132015-04-02 04:06:39 +02001085 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1086 state <= PORT_CONTROL_STATE_BLOCKING) {
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001087 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
1088 if (ret)
1089 goto abort;
1090 }
Andrew Lunncca8b132015-04-02 04:06:39 +02001091 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1092 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1093 reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001094 }
1095
1096abort:
1097 mutex_unlock(&ps->smi_mutex);
1098 return ret;
1099}
1100
1101/* Must be called with smi lock held */
1102static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1103{
1104 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1105 u8 fid = ps->fid[port];
1106 u16 reg = fid << 12;
1107
1108 if (dsa_is_cpu_port(ds, port))
1109 reg |= ds->phys_port_mask;
1110 else
1111 reg |= (ps->bridge_mask[fid] |
1112 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1113
Andrew Lunncca8b132015-04-02 04:06:39 +02001114 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001115}
1116
1117/* Must be called with smi lock held */
1118static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1119{
1120 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1121 int port;
1122 u32 mask;
1123 int ret;
1124
1125 mask = ds->phys_port_mask;
1126 while (mask) {
1127 port = __ffs(mask);
1128 mask &= ~(1 << port);
1129 if (ps->fid[port] != fid)
1130 continue;
1131
1132 ret = _mv88e6xxx_update_port_config(ds, port);
1133 if (ret)
1134 return ret;
1135 }
1136
1137 return _mv88e6xxx_flush_fid(ds, fid);
1138}
1139
1140/* Bridge handling functions */
1141
1142int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1143{
1144 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1145 int ret = 0;
1146 u32 nmask;
1147 int fid;
1148
1149 /* If the bridge group is not empty, join that group.
1150 * Otherwise create a new group.
1151 */
1152 fid = ps->fid[port];
1153 nmask = br_port_mask & ~(1 << port);
1154 if (nmask)
1155 fid = ps->fid[__ffs(nmask)];
1156
1157 nmask = ps->bridge_mask[fid] | (1 << port);
1158 if (nmask != br_port_mask) {
1159 netdev_err(ds->ports[port],
1160 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1161 fid, br_port_mask, nmask);
1162 return -EINVAL;
1163 }
1164
1165 mutex_lock(&ps->smi_mutex);
1166
1167 ps->bridge_mask[fid] = br_port_mask;
1168
1169 if (fid != ps->fid[port]) {
Vivien Didelot194fea72015-08-10 09:09:47 -04001170 clear_bit(ps->fid[port], ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001171 ps->fid[port] = fid;
1172 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1173 }
1174
1175 mutex_unlock(&ps->smi_mutex);
1176
1177 return ret;
1178}
1179
1180int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1181{
1182 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1183 u8 fid, newfid;
1184 int ret;
1185
1186 fid = ps->fid[port];
1187
1188 if (ps->bridge_mask[fid] != br_port_mask) {
1189 netdev_err(ds->ports[port],
1190 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1191 fid, br_port_mask, ps->bridge_mask[fid]);
1192 return -EINVAL;
1193 }
1194
1195 /* If the port was the last port of a bridge, we are done.
1196 * Otherwise assign a new fid to the port, and fix up
1197 * the bridge configuration.
1198 */
1199 if (br_port_mask == (1 << port))
1200 return 0;
1201
1202 mutex_lock(&ps->smi_mutex);
1203
Vivien Didelot194fea72015-08-10 09:09:47 -04001204 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1205 if (unlikely(newfid > ps->num_ports)) {
1206 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1207 ps->num_ports);
1208 ret = -ENOSPC;
1209 goto unlock;
1210 }
1211
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001212 ps->fid[port] = newfid;
Vivien Didelot194fea72015-08-10 09:09:47 -04001213 set_bit(newfid, ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001214 ps->bridge_mask[fid] &= ~(1 << port);
1215 ps->bridge_mask[newfid] = 1 << port;
1216
1217 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1218 if (!ret)
1219 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1220
Vivien Didelot194fea72015-08-10 09:09:47 -04001221unlock:
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001222 mutex_unlock(&ps->smi_mutex);
1223
1224 return ret;
1225}
1226
1227int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1228{
1229 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1230 int stp_state;
1231
1232 switch (state) {
1233 case BR_STATE_DISABLED:
Andrew Lunncca8b132015-04-02 04:06:39 +02001234 stp_state = PORT_CONTROL_STATE_DISABLED;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001235 break;
1236 case BR_STATE_BLOCKING:
1237 case BR_STATE_LISTENING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001238 stp_state = PORT_CONTROL_STATE_BLOCKING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001239 break;
1240 case BR_STATE_LEARNING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001241 stp_state = PORT_CONTROL_STATE_LEARNING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001242 break;
1243 case BR_STATE_FORWARDING:
1244 default:
Andrew Lunncca8b132015-04-02 04:06:39 +02001245 stp_state = PORT_CONTROL_STATE_FORWARDING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001246 break;
1247 }
1248
1249 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1250
1251 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1252 * so we can not update the port state directly but need to schedule it.
1253 */
1254 ps->port_state[port] = stp_state;
1255 set_bit(port, &ps->port_state_update_mask);
1256 schedule_work(&ps->bridge_work);
1257
1258 return 0;
1259}
1260
Vivien Didelotb8fee952015-08-13 12:52:19 -04001261int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1262{
1263 int ret;
1264
1265 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1266 if (ret < 0)
1267 return ret;
1268
1269 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1270
1271 return 0;
1272}
1273
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001274int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1275{
1276 return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1277 pvid & PORT_DEFAULT_VLAN_MASK);
1278}
1279
Vivien Didelot6b17e862015-08-13 12:52:18 -04001280static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1281{
1282 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1283 GLOBAL_VTU_OP_BUSY);
1284}
1285
1286static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1287{
1288 int ret;
1289
1290 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1291 if (ret < 0)
1292 return ret;
1293
1294 return _mv88e6xxx_vtu_wait(ds);
1295}
1296
1297static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1298{
1299 int ret;
1300
1301 ret = _mv88e6xxx_vtu_wait(ds);
1302 if (ret < 0)
1303 return ret;
1304
1305 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1306}
1307
Vivien Didelotb8fee952015-08-13 12:52:19 -04001308static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1309 struct mv88e6xxx_vtu_stu_entry *entry,
1310 unsigned int nibble_offset)
1311{
1312 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1313 u16 regs[3];
1314 int i;
1315 int ret;
1316
1317 for (i = 0; i < 3; ++i) {
1318 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1319 GLOBAL_VTU_DATA_0_3 + i);
1320 if (ret < 0)
1321 return ret;
1322
1323 regs[i] = ret;
1324 }
1325
1326 for (i = 0; i < ps->num_ports; ++i) {
1327 unsigned int shift = (i % 4) * 4 + nibble_offset;
1328 u16 reg = regs[i / 4];
1329
1330 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1331 }
1332
1333 return 0;
1334}
1335
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001336static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1337 struct mv88e6xxx_vtu_stu_entry *entry,
1338 unsigned int nibble_offset)
1339{
1340 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1341 u16 regs[3] = { 0 };
1342 int i;
1343 int ret;
1344
1345 for (i = 0; i < ps->num_ports; ++i) {
1346 unsigned int shift = (i % 4) * 4 + nibble_offset;
1347 u8 data = entry->data[i];
1348
1349 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1350 }
1351
1352 for (i = 0; i < 3; ++i) {
1353 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1354 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1355 if (ret < 0)
1356 return ret;
1357 }
1358
1359 return 0;
1360}
1361
Vivien Didelotb8fee952015-08-13 12:52:19 -04001362static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
1363 struct mv88e6xxx_vtu_stu_entry *entry)
1364{
1365 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1366 int ret;
1367
1368 ret = _mv88e6xxx_vtu_wait(ds);
1369 if (ret < 0)
1370 return ret;
1371
1372 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1373 vid & GLOBAL_VTU_VID_MASK);
1374 if (ret < 0)
1375 return ret;
1376
1377 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1378 if (ret < 0)
1379 return ret;
1380
1381 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1382 if (ret < 0)
1383 return ret;
1384
1385 next.vid = ret & GLOBAL_VTU_VID_MASK;
1386 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1387
1388 if (next.valid) {
1389 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1390 if (ret < 0)
1391 return ret;
1392
1393 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1394 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1395 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1396 GLOBAL_VTU_FID);
1397 if (ret < 0)
1398 return ret;
1399
1400 next.fid = ret & GLOBAL_VTU_FID_MASK;
1401
1402 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1403 GLOBAL_VTU_SID);
1404 if (ret < 0)
1405 return ret;
1406
1407 next.sid = ret & GLOBAL_VTU_SID_MASK;
1408 }
1409 }
1410
1411 *entry = next;
1412 return 0;
1413}
1414
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001415static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1416 struct mv88e6xxx_vtu_stu_entry *entry)
1417{
1418 u16 reg = 0;
1419 int ret;
1420
1421 ret = _mv88e6xxx_vtu_wait(ds);
1422 if (ret < 0)
1423 return ret;
1424
1425 if (!entry->valid)
1426 goto loadpurge;
1427
1428 /* Write port member tags */
1429 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1430 if (ret < 0)
1431 return ret;
1432
1433 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1434 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1435 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1436 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1437 if (ret < 0)
1438 return ret;
1439
1440 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1441 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1442 if (ret < 0)
1443 return ret;
1444 }
1445
1446 reg = GLOBAL_VTU_VID_VALID;
1447loadpurge:
1448 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1449 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1450 if (ret < 0)
1451 return ret;
1452
1453 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1454}
1455
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001456static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1457 struct mv88e6xxx_vtu_stu_entry *entry)
1458{
1459 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1460 int ret;
1461
1462 ret = _mv88e6xxx_vtu_wait(ds);
1463 if (ret < 0)
1464 return ret;
1465
1466 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1467 sid & GLOBAL_VTU_SID_MASK);
1468 if (ret < 0)
1469 return ret;
1470
1471 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1472 if (ret < 0)
1473 return ret;
1474
1475 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1476 if (ret < 0)
1477 return ret;
1478
1479 next.sid = ret & GLOBAL_VTU_SID_MASK;
1480
1481 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1482 if (ret < 0)
1483 return ret;
1484
1485 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1486
1487 if (next.valid) {
1488 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1489 if (ret < 0)
1490 return ret;
1491 }
1492
1493 *entry = next;
1494 return 0;
1495}
1496
1497static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1498 struct mv88e6xxx_vtu_stu_entry *entry)
1499{
1500 u16 reg = 0;
1501 int ret;
1502
1503 ret = _mv88e6xxx_vtu_wait(ds);
1504 if (ret < 0)
1505 return ret;
1506
1507 if (!entry->valid)
1508 goto loadpurge;
1509
1510 /* Write port states */
1511 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1512 if (ret < 0)
1513 return ret;
1514
1515 reg = GLOBAL_VTU_VID_VALID;
1516loadpurge:
1517 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1518 if (ret < 0)
1519 return ret;
1520
1521 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1522 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1523 if (ret < 0)
1524 return ret;
1525
1526 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1527}
1528
1529static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1530 struct mv88e6xxx_vtu_stu_entry *entry)
1531{
1532 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1533 struct mv88e6xxx_vtu_stu_entry vlan = {
1534 .valid = true,
1535 .vid = vid,
1536 };
1537 int i;
1538
1539 /* exclude all ports except the CPU */
1540 for (i = 0; i < ps->num_ports; ++i)
1541 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1542 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1543 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1544
1545 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1546 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1547 struct mv88e6xxx_vtu_stu_entry vstp;
1548 int err;
1549
1550 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1551 * implemented, only one STU entry is needed to cover all VTU
1552 * entries. Thus, validate the SID 0.
1553 */
1554 vlan.sid = 0;
1555 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1556 if (err)
1557 return err;
1558
1559 if (vstp.sid != vlan.sid || !vstp.valid) {
1560 memset(&vstp, 0, sizeof(vstp));
1561 vstp.valid = true;
1562 vstp.sid = vlan.sid;
1563
1564 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1565 if (err)
1566 return err;
1567 }
1568
1569 /* Non-bridged ports and bridge groups use FIDs from 1 to
1570 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
1571 */
1572 vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
1573 ps->num_ports + 1);
1574 if (unlikely(vlan.fid == VLAN_N_VID)) {
1575 pr_err("no more FID available for VLAN %d\n", vid);
1576 return -ENOSPC;
1577 }
1578
1579 err = _mv88e6xxx_flush_fid(ds, vlan.fid);
1580 if (err)
1581 return err;
1582
1583 set_bit(vlan.fid, ps->fid_bitmap);
1584 }
1585
1586 *entry = vlan;
1587 return 0;
1588}
1589
1590int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1591 bool untagged)
1592{
1593 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1594 struct mv88e6xxx_vtu_stu_entry vlan;
1595 int err;
1596
1597 mutex_lock(&ps->smi_mutex);
1598 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1599 if (err)
1600 goto unlock;
1601
1602 if (vlan.vid != vid || !vlan.valid) {
1603 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1604 if (err)
1605 goto unlock;
1606 }
1607
1608 vlan.data[port] = untagged ?
1609 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1610 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1611
1612 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1613unlock:
1614 mutex_unlock(&ps->smi_mutex);
1615
1616 return err;
1617}
1618
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001619int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1620{
1621 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1622 struct mv88e6xxx_vtu_stu_entry vlan;
1623 bool keep = false;
1624 int i, err;
1625
1626 mutex_lock(&ps->smi_mutex);
1627
1628 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1629 if (err)
1630 goto unlock;
1631
1632 if (vlan.vid != vid || !vlan.valid ||
1633 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1634 err = -ENOENT;
1635 goto unlock;
1636 }
1637
1638 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1639
1640 /* keep the VLAN unless all ports are excluded */
1641 for (i = 0; i < ps->num_ports; ++i) {
1642 if (dsa_is_cpu_port(ds, i))
1643 continue;
1644
1645 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1646 keep = true;
1647 break;
1648 }
1649 }
1650
1651 vlan.valid = keep;
1652 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1653 if (err)
1654 goto unlock;
1655
1656 if (!keep)
1657 clear_bit(vlan.fid, ps->fid_bitmap);
1658
1659unlock:
1660 mutex_unlock(&ps->smi_mutex);
1661
1662 return err;
1663}
1664
Vivien Didelot02512b62015-08-13 12:52:20 -04001665static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
1666 struct mv88e6xxx_vtu_stu_entry *entry)
1667{
1668 int err;
1669
1670 do {
1671 if (vid == 4095)
1672 return -ENOENT;
1673
1674 err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
1675 if (err)
1676 return err;
1677
1678 if (!entry->valid)
1679 return -ENOENT;
1680
1681 vid = entry->vid;
1682 } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
1683 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
1684
1685 return 0;
1686}
1687
Vivien Didelotb8fee952015-08-13 12:52:19 -04001688int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1689 unsigned long *ports, unsigned long *untagged)
1690{
1691 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1692 struct mv88e6xxx_vtu_stu_entry next;
1693 int port;
1694 int err;
1695
1696 if (*vid == 4095)
1697 return -ENOENT;
1698
1699 mutex_lock(&ps->smi_mutex);
1700 err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
1701 mutex_unlock(&ps->smi_mutex);
1702
1703 if (err)
1704 return err;
1705
1706 if (!next.valid)
1707 return -ENOENT;
1708
1709 *vid = next.vid;
1710
1711 for (port = 0; port < ps->num_ports; ++port) {
1712 clear_bit(port, ports);
1713 clear_bit(port, untagged);
1714
1715 if (dsa_is_cpu_port(ds, port))
1716 continue;
1717
1718 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1719 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1720 set_bit(port, ports);
1721
1722 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1723 set_bit(port, untagged);
1724 }
1725
1726 return 0;
1727}
1728
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001729static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1730 const unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001731{
1732 int i, ret;
1733
1734 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001735 ret = _mv88e6xxx_reg_write(
1736 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1737 (addr[i * 2] << 8) | addr[i * 2 + 1]);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001738 if (ret < 0)
1739 return ret;
1740 }
1741
1742 return 0;
1743}
1744
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001745static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001746{
1747 int i, ret;
1748
1749 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001750 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1751 GLOBAL_ATU_MAC_01 + i);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001752 if (ret < 0)
1753 return ret;
1754 addr[i * 2] = ret >> 8;
1755 addr[i * 2 + 1] = ret & 0xff;
1756 }
1757
1758 return 0;
1759}
1760
Vivien Didelotfd231c82015-08-10 09:09:50 -04001761static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1762 struct mv88e6xxx_atu_entry *entry)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001763{
Vivien Didelotfd231c82015-08-10 09:09:50 -04001764 u16 reg = 0;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001765 int ret;
1766
1767 ret = _mv88e6xxx_atu_wait(ds);
1768 if (ret < 0)
1769 return ret;
1770
Vivien Didelotfd231c82015-08-10 09:09:50 -04001771 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001772 if (ret < 0)
1773 return ret;
1774
Vivien Didelotfd231c82015-08-10 09:09:50 -04001775 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1776 unsigned int mask, shift;
1777
1778 if (entry->trunk) {
1779 reg |= GLOBAL_ATU_DATA_TRUNK;
1780 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1781 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1782 } else {
1783 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1784 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1785 }
1786
1787 reg |= (entry->portv_trunkid << shift) & mask;
1788 }
1789
1790 reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1791
1792 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
1793 if (ret < 0)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001794 return ret;
1795
Vivien Didelotfd231c82015-08-10 09:09:50 -04001796 return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
1797}
David S. Millercdf09692015-08-11 12:00:37 -07001798
Vivien Didelotfd231c82015-08-10 09:09:50 -04001799static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1800{
1801 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot02512b62015-08-13 12:52:20 -04001802 struct mv88e6xxx_vtu_stu_entry vlan;
1803 int err;
Vivien Didelotfd231c82015-08-10 09:09:50 -04001804
1805 if (vid == 0)
1806 return ps->fid[port];
1807
Vivien Didelot02512b62015-08-13 12:52:20 -04001808 err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
1809 if (err)
1810 return err;
1811
1812 if (vlan.vid == vid)
1813 return vlan.fid;
1814
Vivien Didelotfd231c82015-08-10 09:09:50 -04001815 return -ENOENT;
1816}
1817
1818static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1819 const unsigned char *addr, u16 vid,
1820 u8 state)
1821{
1822 struct mv88e6xxx_atu_entry entry = { 0 };
1823 int ret;
1824
1825 ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
1826 if (ret < 0)
1827 return ret;
1828
1829 entry.fid = ret;
1830 entry.state = state;
1831 ether_addr_copy(entry.mac, addr);
1832 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1833 entry.trunk = false;
1834 entry.portv_trunkid = BIT(port);
1835 }
1836
1837 return _mv88e6xxx_atu_load(ds, &entry);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001838}
1839
David S. Millercdf09692015-08-11 12:00:37 -07001840int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1841 const unsigned char *addr, u16 vid)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001842{
David S. Millercdf09692015-08-11 12:00:37 -07001843 int state = is_multicast_ether_addr(addr) ?
1844 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1845 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1846 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot6630e232015-08-06 01:44:07 -04001847 int ret;
1848
David S. Millercdf09692015-08-11 12:00:37 -07001849 mutex_lock(&ps->smi_mutex);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001850 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
David S. Millercdf09692015-08-11 12:00:37 -07001851 mutex_unlock(&ps->smi_mutex);
1852
1853 return ret;
1854}
1855
1856int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1857 const unsigned char *addr, u16 vid)
1858{
1859 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1860 int ret;
1861
1862 mutex_lock(&ps->smi_mutex);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001863 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
David S. Millercdf09692015-08-11 12:00:37 -07001864 GLOBAL_ATU_DATA_STATE_UNUSED);
1865 mutex_unlock(&ps->smi_mutex);
1866
1867 return ret;
1868}
1869
Vivien Didelot1d194042015-08-10 09:09:51 -04001870static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1871 const unsigned char *addr,
1872 struct mv88e6xxx_atu_entry *entry)
David S. Millercdf09692015-08-11 12:00:37 -07001873{
Vivien Didelot1d194042015-08-10 09:09:51 -04001874 struct mv88e6xxx_atu_entry next = { 0 };
1875 int ret;
1876
1877 next.fid = fid;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001878
1879 ret = _mv88e6xxx_atu_wait(ds);
1880 if (ret < 0)
1881 return ret;
1882
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001883 ret = _mv88e6xxx_atu_mac_write(ds, addr);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001884 if (ret < 0)
1885 return ret;
1886
Vivien Didelot1d194042015-08-10 09:09:51 -04001887 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001888 if (ret < 0)
1889 return ret;
1890
Vivien Didelot1d194042015-08-10 09:09:51 -04001891 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1892 if (ret < 0)
1893 return ret;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001894
Vivien Didelot1d194042015-08-10 09:09:51 -04001895 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1896 if (ret < 0)
1897 return ret;
1898
1899 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1900 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1901 unsigned int mask, shift;
1902
1903 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1904 next.trunk = true;
1905 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1906 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1907 } else {
1908 next.trunk = false;
1909 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1910 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1911 }
1912
1913 next.portv_trunkid = (ret & mask) >> shift;
1914 }
1915
1916 *entry = next;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001917 return 0;
1918}
1919
David S. Millercdf09692015-08-11 12:00:37 -07001920/* get next entry for port */
1921int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
Vivien Didelot2a778e12015-08-10 09:09:49 -04001922 unsigned char *addr, u16 *vid, bool *is_static)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001923{
1924 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot1d194042015-08-10 09:09:51 -04001925 struct mv88e6xxx_atu_entry next;
1926 u16 fid;
Vivien Didelot87820512015-08-06 01:44:08 -04001927 int ret;
1928
1929 mutex_lock(&ps->smi_mutex);
Vivien Didelot1d194042015-08-10 09:09:51 -04001930
1931 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1932 if (ret < 0)
1933 goto unlock;
1934 fid = ret;
1935
1936 do {
1937 if (is_broadcast_ether_addr(addr)) {
Vivien Didelot02512b62015-08-13 12:52:20 -04001938 struct mv88e6xxx_vtu_stu_entry vtu;
1939
1940 ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
1941 if (ret < 0)
1942 goto unlock;
1943
1944 *vid = vtu.vid;
1945 fid = vtu.fid;
Vivien Didelot1d194042015-08-10 09:09:51 -04001946 }
1947
1948 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1949 if (ret < 0)
1950 goto unlock;
1951
1952 ether_addr_copy(addr, next.mac);
1953
1954 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1955 continue;
1956 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
1957
1958 *is_static = next.state == (is_multicast_ether_addr(addr) ?
1959 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1960 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1961unlock:
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001962 mutex_unlock(&ps->smi_mutex);
1963
1964 return ret;
1965}
1966
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001967static void mv88e6xxx_bridge_work(struct work_struct *work)
1968{
1969 struct mv88e6xxx_priv_state *ps;
1970 struct dsa_switch *ds;
1971 int port;
1972
1973 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1974 ds = ((struct dsa_switch *)ps) - 1;
1975
1976 while (ps->port_state_update_mask) {
1977 port = __ffs(ps->port_state_update_mask);
1978 clear_bit(port, &ps->port_state_update_mask);
1979 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1980 }
1981}
1982
Andrew Lunndbde9e62015-05-06 01:09:48 +02001983static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
Guenter Roeckd827e882015-03-26 18:36:29 -07001984{
1985 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001986 int ret, fid;
Andrew Lunn54d792f2015-05-06 01:09:47 +02001987 u16 reg;
Guenter Roeckd827e882015-03-26 18:36:29 -07001988
1989 mutex_lock(&ps->smi_mutex);
1990
Andrew Lunn54d792f2015-05-06 01:09:47 +02001991 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1992 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1993 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001994 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001995 /* MAC Forcing register: don't force link, speed,
1996 * duplex or flow control state to any particular
1997 * values on physical ports, but force the CPU port
1998 * and all DSA ports to their maximum bandwidth and
1999 * full duplex.
2000 */
2001 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
Andrew Lunn60045cb2015-08-17 23:52:51 +02002002 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002003 reg |= PORT_PCS_CTRL_FORCE_LINK |
2004 PORT_PCS_CTRL_LINK_UP |
2005 PORT_PCS_CTRL_DUPLEX_FULL |
2006 PORT_PCS_CTRL_FORCE_DUPLEX;
2007 if (mv88e6xxx_6065_family(ds))
2008 reg |= PORT_PCS_CTRL_100;
2009 else
2010 reg |= PORT_PCS_CTRL_1000;
2011 } else {
2012 reg |= PORT_PCS_CTRL_UNFORCED;
2013 }
2014
2015 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2016 PORT_PCS_CTRL, reg);
2017 if (ret)
2018 goto abort;
2019 }
2020
2021 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2022 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2023 * tunneling, determine priority by looking at 802.1p and IP
2024 * priority fields (IP prio has precedence), and set STP state
2025 * to Forwarding.
2026 *
2027 * If this is the CPU link, use DSA or EDSA tagging depending
2028 * on which tagging mode was configured.
2029 *
2030 * If this is a link to another switch, use DSA tagging mode.
2031 *
2032 * If this is the upstream port for this switch, enable
2033 * forwarding of unknown unicasts and multicasts.
2034 */
2035 reg = 0;
2036 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2037 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2038 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002039 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002040 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2041 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2042 PORT_CONTROL_STATE_FORWARDING;
2043 if (dsa_is_cpu_port(ds, port)) {
2044 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2045 reg |= PORT_CONTROL_DSA_TAG;
2046 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002047 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2048 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002049 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2050 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2051 else
2052 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2053 }
2054
2055 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2056 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2057 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002058 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002059 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2060 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2061 }
2062 }
Andrew Lunn6083ce72015-08-17 23:52:52 +02002063 if (dsa_is_dsa_port(ds, port)) {
2064 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2065 reg |= PORT_CONTROL_DSA_TAG;
2066 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2067 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2068 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002069 reg |= PORT_CONTROL_FRAME_MODE_DSA;
Andrew Lunn6083ce72015-08-17 23:52:52 +02002070 }
2071
Andrew Lunn54d792f2015-05-06 01:09:47 +02002072 if (port == dsa_upstream_port(ds))
2073 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2074 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2075 }
2076 if (reg) {
2077 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2078 PORT_CONTROL, reg);
2079 if (ret)
2080 goto abort;
2081 }
2082
Vivien Didelot8efdda42015-08-13 12:52:23 -04002083 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2084 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2085 * untagged frames on this port, do a destination address lookup on all
2086 * received packets as usual, disable ARP mirroring and don't send a
2087 * copy of all transmitted/received frames on this port to the CPU.
Andrew Lunn54d792f2015-05-06 01:09:47 +02002088 */
2089 reg = 0;
2090 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2091 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002092 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002093 reg = PORT_CONTROL_2_MAP_DA;
2094
2095 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002096 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002097 reg |= PORT_CONTROL_2_JUMBO_10240;
2098
2099 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2100 /* Set the upstream port this port should use */
2101 reg |= dsa_upstream_port(ds);
2102 /* enable forwarding of unknown multicast addresses to
2103 * the upstream port
2104 */
2105 if (port == dsa_upstream_port(ds))
2106 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2107 }
2108
Vivien Didelotf5117ce2015-08-19 18:54:55 -04002109 reg |= PORT_CONTROL_2_8021Q_FALLBACK;
Vivien Didelot8efdda42015-08-13 12:52:23 -04002110
Andrew Lunn54d792f2015-05-06 01:09:47 +02002111 if (reg) {
2112 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2113 PORT_CONTROL_2, reg);
2114 if (ret)
2115 goto abort;
2116 }
2117
2118 /* Port Association Vector: when learning source addresses
2119 * of packets, add the address to the address database using
2120 * a port bitmap that has only the bit for this port set and
2121 * the other bits clear.
2122 */
2123 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
2124 1 << port);
2125 if (ret)
2126 goto abort;
2127
2128 /* Egress rate control 2: disable egress rate control. */
2129 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2130 0x0000);
2131 if (ret)
2132 goto abort;
2133
2134 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002135 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2136 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002137 /* Do not limit the period of time that this port can
2138 * be paused for by the remote end or the period of
2139 * time that this port can pause the remote end.
2140 */
2141 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2142 PORT_PAUSE_CTRL, 0x0000);
2143 if (ret)
2144 goto abort;
2145
2146 /* Port ATU control: disable limiting the number of
2147 * address database entries that this port is allowed
2148 * to use.
2149 */
2150 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2151 PORT_ATU_CONTROL, 0x0000);
2152 /* Priority Override: disable DA, SA and VTU priority
2153 * override.
2154 */
2155 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2156 PORT_PRI_OVERRIDE, 0x0000);
2157 if (ret)
2158 goto abort;
2159
2160 /* Port Ethertype: use the Ethertype DSA Ethertype
2161 * value.
2162 */
2163 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2164 PORT_ETH_TYPE, ETH_P_EDSA);
2165 if (ret)
2166 goto abort;
2167 /* Tag Remap: use an identity 802.1p prio -> switch
2168 * prio mapping.
2169 */
2170 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2171 PORT_TAG_REGMAP_0123, 0x3210);
2172 if (ret)
2173 goto abort;
2174
2175 /* Tag Remap 2: use an identity 802.1p prio -> switch
2176 * prio mapping.
2177 */
2178 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2179 PORT_TAG_REGMAP_4567, 0x7654);
2180 if (ret)
2181 goto abort;
2182 }
2183
2184 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2185 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002186 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2187 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002188 /* Rate Control: disable ingress rate limiting. */
2189 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2190 PORT_RATE_CONTROL, 0x0001);
2191 if (ret)
2192 goto abort;
2193 }
2194
Guenter Roeck366f0a02015-03-26 18:36:30 -07002195 /* Port Control 1: disable trunking, disable sending
2196 * learning messages to this port.
Guenter Roeckd827e882015-03-26 18:36:29 -07002197 */
Vivien Didelot614f03f2015-04-20 17:19:23 -04002198 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07002199 if (ret)
2200 goto abort;
2201
2202 /* Port based VLAN map: give each port its own address
2203 * database, allow the CPU port to talk to each of the 'real'
2204 * ports, and allow each of the 'real' ports to only talk to
2205 * the upstream port.
2206 */
Vivien Didelot194fea72015-08-10 09:09:47 -04002207 fid = port + 1;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002208 ps->fid[port] = fid;
Vivien Didelot194fea72015-08-10 09:09:47 -04002209 set_bit(fid, ps->fid_bitmap);
Guenter Roeckd827e882015-03-26 18:36:29 -07002210
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002211 if (!dsa_is_cpu_port(ds, port))
2212 ps->bridge_mask[fid] = 1 << port;
2213
2214 ret = _mv88e6xxx_update_port_config(ds, port);
Guenter Roeckd827e882015-03-26 18:36:29 -07002215 if (ret)
2216 goto abort;
2217
2218 /* Default VLAN ID and priority: don't set a default VLAN
2219 * ID, and set the default packet priority to zero.
2220 */
Vivien Didelot47cf1e62015-04-20 17:43:26 -04002221 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2222 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07002223abort:
2224 mutex_unlock(&ps->smi_mutex);
2225 return ret;
2226}
2227
Andrew Lunndbde9e62015-05-06 01:09:48 +02002228int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2229{
2230 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2231 int ret;
2232 int i;
2233
2234 for (i = 0; i < ps->num_ports; i++) {
2235 ret = mv88e6xxx_setup_port(ds, i);
2236 if (ret < 0)
2237 return ret;
2238 }
2239 return 0;
2240}
2241
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002242static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
2243{
2244 struct dsa_switch *ds = s->private;
2245
2246 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2247 int reg, port;
2248
2249 seq_puts(s, " GLOBAL GLOBAL2 ");
2250 for (port = 0 ; port < ps->num_ports; port++)
2251 seq_printf(s, " %2d ", port);
2252 seq_puts(s, "\n");
2253
2254 for (reg = 0; reg < 32; reg++) {
2255 seq_printf(s, "%2x: ", reg);
2256 seq_printf(s, " %4x %4x ",
2257 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
2258 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
2259
2260 for (port = 0 ; port < ps->num_ports; port++)
2261 seq_printf(s, "%4x ",
2262 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
2263 seq_puts(s, "\n");
2264 }
2265
2266 return 0;
2267}
2268
2269static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
2270{
2271 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
2272}
2273
2274static const struct file_operations mv88e6xxx_regs_fops = {
2275 .open = mv88e6xxx_regs_open,
2276 .read = seq_read,
2277 .llseek = no_llseek,
2278 .release = single_release,
2279 .owner = THIS_MODULE,
2280};
2281
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002282static void mv88e6xxx_atu_show_header(struct seq_file *s)
2283{
2284 seq_puts(s, "DB T/P Vec State Addr\n");
2285}
2286
2287static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
2288 unsigned char *addr, int data)
2289{
2290 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
2291 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
2292 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
2293 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
2294
2295 seq_printf(s, "%03x %5s %10pb %x %pM\n",
2296 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
2297}
2298
2299static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
2300 int dbnum)
2301{
2302 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2303 unsigned char addr[6];
2304 int ret, data, state;
2305
Vivien Didelotc5723ac2015-08-10 09:09:48 -04002306 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002307 if (ret < 0)
2308 return ret;
2309
2310 do {
2311 ret = _mv88e6xxx_atu_cmd(ds, dbnum, GLOBAL_ATU_OP_GET_NEXT_DB);
2312 if (ret < 0)
2313 return ret;
2314 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
2315 if (data < 0)
2316 return data;
2317
2318 state = data & GLOBAL_ATU_DATA_STATE_MASK;
2319 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
2320 break;
Vivien Didelotc5723ac2015-08-10 09:09:48 -04002321 ret = _mv88e6xxx_atu_mac_read(ds, addr);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002322 if (ret < 0)
2323 return ret;
2324 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
2325 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
2326
2327 return 0;
2328}
2329
2330static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
2331{
2332 struct dsa_switch *ds = s->private;
2333 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2334 int dbnum;
2335
2336 mv88e6xxx_atu_show_header(s);
2337
2338 for (dbnum = 0; dbnum < 255; dbnum++) {
2339 mutex_lock(&ps->smi_mutex);
2340 mv88e6xxx_atu_show_db(s, ds, dbnum);
2341 mutex_unlock(&ps->smi_mutex);
2342 }
2343
2344 return 0;
2345}
2346
2347static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
2348{
2349 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
2350}
2351
2352static const struct file_operations mv88e6xxx_atu_fops = {
2353 .open = mv88e6xxx_atu_open,
2354 .read = seq_read,
2355 .llseek = no_llseek,
2356 .release = single_release,
2357 .owner = THIS_MODULE,
2358};
2359
Andrew Lunn532c7a32015-06-20 18:42:31 +02002360static void mv88e6xxx_stats_show_header(struct seq_file *s,
2361 struct mv88e6xxx_priv_state *ps)
2362{
2363 int port;
2364
2365 seq_puts(s, " Statistic ");
2366 for (port = 0 ; port < ps->num_ports; port++)
2367 seq_printf(s, "Port %2d ", port);
2368 seq_puts(s, "\n");
2369}
2370
2371static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
2372{
2373 struct dsa_switch *ds = s->private;
2374 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2375 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
2376 int port, stat, max_stats;
2377 uint64_t value;
2378
2379 if (have_sw_in_discards(ds))
2380 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
2381 else
2382 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
2383
2384 mv88e6xxx_stats_show_header(s, ps);
2385
2386 mutex_lock(&ps->smi_mutex);
2387
2388 for (stat = 0; stat < max_stats; stat++) {
2389 seq_printf(s, "%19s: ", stats[stat].string);
2390 for (port = 0 ; port < ps->num_ports; port++) {
2391 _mv88e6xxx_stats_snapshot(ds, port);
2392 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
2393 port);
2394 seq_printf(s, "%8llu ", value);
2395 }
2396 seq_puts(s, "\n");
2397 }
2398 mutex_unlock(&ps->smi_mutex);
2399
2400 return 0;
2401}
2402
2403static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
2404{
2405 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
2406}
2407
2408static const struct file_operations mv88e6xxx_stats_fops = {
2409 .open = mv88e6xxx_stats_open,
2410 .read = seq_read,
2411 .llseek = no_llseek,
2412 .release = single_release,
2413 .owner = THIS_MODULE,
2414};
2415
Andrew Lunnd35bd872015-06-20 18:42:32 +02002416static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
2417{
2418 struct dsa_switch *ds = s->private;
2419 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2420 int target, ret;
2421
2422 seq_puts(s, "Target Port\n");
2423
2424 mutex_lock(&ps->smi_mutex);
2425 for (target = 0; target < 32; target++) {
2426 ret = _mv88e6xxx_reg_write(
2427 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2428 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
2429 if (ret < 0)
2430 goto out;
2431 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2432 GLOBAL2_DEVICE_MAPPING);
2433 seq_printf(s, " %2d %2d\n", target,
2434 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
2435 }
2436out:
2437 mutex_unlock(&ps->smi_mutex);
2438
2439 return 0;
2440}
2441
2442static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
2443{
2444 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
2445}
2446
2447static const struct file_operations mv88e6xxx_device_map_fops = {
2448 .open = mv88e6xxx_device_map_open,
2449 .read = seq_read,
2450 .llseek = no_llseek,
2451 .release = single_release,
2452 .owner = THIS_MODULE,
2453};
2454
Andrew Lunn56d95e22015-06-20 18:42:33 +02002455static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
2456{
2457 struct dsa_switch *ds = s->private;
2458 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2459 int reg, ret;
2460
2461 seq_puts(s, "Register Value\n");
2462
2463 mutex_lock(&ps->smi_mutex);
2464 for (reg = 0; reg < 0x80; reg++) {
2465 ret = _mv88e6xxx_reg_write(
2466 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
2467 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
2468 if (ret < 0)
2469 goto out;
2470
2471 ret = _mv88e6xxx_scratch_wait(ds);
2472 if (ret < 0)
2473 goto out;
2474
2475 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2476 GLOBAL2_SCRATCH_MISC);
2477 seq_printf(s, " %2x %2x\n", reg,
2478 ret & GLOBAL2_SCRATCH_VALUE_MASK);
2479 }
2480out:
2481 mutex_unlock(&ps->smi_mutex);
2482
2483 return 0;
2484}
2485
2486static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
2487{
2488 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
2489}
2490
2491static const struct file_operations mv88e6xxx_scratch_fops = {
2492 .open = mv88e6xxx_scratch_open,
2493 .read = seq_read,
2494 .llseek = no_llseek,
2495 .release = single_release,
2496 .owner = THIS_MODULE,
2497};
2498
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002499int mv88e6xxx_setup_common(struct dsa_switch *ds)
2500{
2501 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002502 char *name;
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002503
2504 mutex_init(&ps->smi_mutex);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002505
Andrew Lunncca8b132015-04-02 04:06:39 +02002506 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
Andrew Lunna8f064c2015-03-26 18:36:40 -07002507
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002508 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2509
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002510 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
2511 ps->dbgfs = debugfs_create_dir(name, NULL);
2512 kfree(name);
2513
2514 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
2515 &mv88e6xxx_regs_fops);
2516
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002517 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
2518 &mv88e6xxx_atu_fops);
2519
Andrew Lunn532c7a32015-06-20 18:42:31 +02002520 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
2521 &mv88e6xxx_stats_fops);
2522
Andrew Lunnd35bd872015-06-20 18:42:32 +02002523 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
2524 &mv88e6xxx_device_map_fops);
Andrew Lunn56d95e22015-06-20 18:42:33 +02002525
2526 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
2527 &mv88e6xxx_scratch_fops);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002528 return 0;
2529}
2530
Andrew Lunn54d792f2015-05-06 01:09:47 +02002531int mv88e6xxx_setup_global(struct dsa_switch *ds)
2532{
2533 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot24751e22015-08-03 09:17:44 -04002534 int ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002535 int i;
2536
2537 /* Set the default address aging time to 5 minutes, and
2538 * enable address learn messages to be sent to all message
2539 * ports.
2540 */
2541 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2542 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2543
2544 /* Configure the IP ToS mapping registers. */
2545 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2546 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2547 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2548 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2549 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2550 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2551 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2552 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2553
2554 /* Configure the IEEE 802.1p priority mapping register. */
2555 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2556
2557 /* Send all frames with destination addresses matching
2558 * 01:80:c2:00:00:0x to the CPU port.
2559 */
2560 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2561
2562 /* Ignore removed tag data on doubly tagged packets, disable
2563 * flow control messages, force flow control priority to the
2564 * highest, and send all special multicast frames to the CPU
2565 * port at the highest priority.
2566 */
2567 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2568 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2569 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2570
2571 /* Program the DSA routing table. */
2572 for (i = 0; i < 32; i++) {
2573 int nexthop = 0x1f;
2574
2575 if (ds->pd->rtable &&
2576 i != ds->index && i < ds->dst->pd->nr_chips)
2577 nexthop = ds->pd->rtable[i] & 0x1f;
2578
2579 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2580 GLOBAL2_DEVICE_MAPPING_UPDATE |
2581 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2582 nexthop);
2583 }
2584
2585 /* Clear all trunk masks. */
2586 for (i = 0; i < 8; i++)
2587 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2588 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2589 ((1 << ps->num_ports) - 1));
2590
2591 /* Clear all trunk mappings. */
2592 for (i = 0; i < 16; i++)
2593 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2594 GLOBAL2_TRUNK_MAPPING_UPDATE |
2595 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2596
2597 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002598 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2599 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002600 /* Send all frames with destination addresses matching
2601 * 01:80:c2:00:00:2x to the CPU port.
2602 */
2603 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2604
2605 /* Initialise cross-chip port VLAN table to reset
2606 * defaults.
2607 */
2608 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2609
2610 /* Clear the priority override table. */
2611 for (i = 0; i < 16; i++)
2612 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2613 0x8000 | (i << 8));
2614 }
2615
2616 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2617 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002618 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2619 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002620 /* Disable ingress rate limiting by resetting all
2621 * ingress rate limit registers to their initial
2622 * state.
2623 */
2624 for (i = 0; i < ps->num_ports; i++)
2625 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2626 0x9000 | (i << 8));
2627 }
2628
Andrew Lunndb687a52015-06-20 21:31:29 +02002629 /* Clear the statistics counters for all ports */
2630 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2631
2632 /* Wait for the flush to complete. */
Vivien Didelot24751e22015-08-03 09:17:44 -04002633 mutex_lock(&ps->smi_mutex);
2634 ret = _mv88e6xxx_stats_wait(ds);
Vivien Didelot6b17e862015-08-13 12:52:18 -04002635 if (ret < 0)
2636 goto unlock;
2637
2638 /* Clear all the VTU and STU entries */
2639 ret = _mv88e6xxx_vtu_stu_flush(ds);
2640unlock:
Vivien Didelot24751e22015-08-03 09:17:44 -04002641 mutex_unlock(&ps->smi_mutex);
Andrew Lunndb687a52015-06-20 21:31:29 +02002642
Vivien Didelot24751e22015-08-03 09:17:44 -04002643 return ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002644}
2645
Andrew Lunn143a8302015-04-02 04:06:34 +02002646int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2647{
2648 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2649 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2650 unsigned long timeout;
2651 int ret;
2652 int i;
2653
2654 /* Set all ports to the disabled state. */
2655 for (i = 0; i < ps->num_ports; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02002656 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2657 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
Andrew Lunn143a8302015-04-02 04:06:34 +02002658 }
2659
2660 /* Wait for transmit queues to drain. */
2661 usleep_range(2000, 4000);
2662
2663 /* Reset the switch. Keep the PPU active if requested. The PPU
2664 * needs to be active to support indirect phy register access
2665 * through global registers 0x18 and 0x19.
2666 */
2667 if (ppu_active)
2668 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2669 else
2670 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2671
2672 /* Wait up to one second for reset to complete. */
2673 timeout = jiffies + 1 * HZ;
2674 while (time_before(jiffies, timeout)) {
2675 ret = REG_READ(REG_GLOBAL, 0x00);
2676 if ((ret & is_reset) == is_reset)
2677 break;
2678 usleep_range(1000, 2000);
2679 }
2680 if (time_after(jiffies, timeout))
2681 return -ETIMEDOUT;
2682
2683 return 0;
2684}
2685
Andrew Lunn491435852015-04-02 04:06:35 +02002686int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2687{
2688 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2689 int ret;
2690
Andrew Lunn3898c142015-05-06 01:09:53 +02002691 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002692 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002693 if (ret < 0)
2694 goto error;
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002695 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
Andrew Lunn491435852015-04-02 04:06:35 +02002696error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002697 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002698 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002699 return ret;
2700}
2701
2702int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2703 int reg, int val)
2704{
2705 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2706 int ret;
2707
Andrew Lunn3898c142015-05-06 01:09:53 +02002708 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002709 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002710 if (ret < 0)
2711 goto error;
2712
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002713 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
Andrew Lunn491435852015-04-02 04:06:35 +02002714error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002715 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002716 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002717 return ret;
2718}
2719
2720static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2721{
2722 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2723
2724 if (port >= 0 && port < ps->num_ports)
2725 return port;
2726 return -EINVAL;
2727}
2728
2729int
2730mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2731{
2732 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2733 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2734 int ret;
2735
2736 if (addr < 0)
2737 return addr;
2738
Andrew Lunn3898c142015-05-06 01:09:53 +02002739 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002740 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002741 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002742 return ret;
2743}
2744
2745int
2746mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2747{
2748 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2749 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2750 int ret;
2751
2752 if (addr < 0)
2753 return addr;
2754
Andrew Lunn3898c142015-05-06 01:09:53 +02002755 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002756 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002757 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002758 return ret;
2759}
2760
2761int
2762mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2763{
2764 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2765 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2766 int ret;
2767
2768 if (addr < 0)
2769 return addr;
2770
Andrew Lunn3898c142015-05-06 01:09:53 +02002771 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002772 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002773 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002774 return ret;
2775}
2776
2777int
2778mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2779 u16 val)
2780{
2781 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2782 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2783 int ret;
2784
2785 if (addr < 0)
2786 return addr;
2787
Andrew Lunn3898c142015-05-06 01:09:53 +02002788 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002789 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002790 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002791 return ret;
2792}
2793
Guenter Roeckc22995c2015-07-25 09:42:28 -07002794#ifdef CONFIG_NET_DSA_HWMON
2795
2796static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2797{
2798 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2799 int ret;
2800 int val;
2801
2802 *temp = 0;
2803
2804 mutex_lock(&ps->smi_mutex);
2805
2806 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2807 if (ret < 0)
2808 goto error;
2809
2810 /* Enable temperature sensor */
2811 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2812 if (ret < 0)
2813 goto error;
2814
2815 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2816 if (ret < 0)
2817 goto error;
2818
2819 /* Wait for temperature to stabilize */
2820 usleep_range(10000, 12000);
2821
2822 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2823 if (val < 0) {
2824 ret = val;
2825 goto error;
2826 }
2827
2828 /* Disable temperature sensor */
2829 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2830 if (ret < 0)
2831 goto error;
2832
2833 *temp = ((val & 0x1f) - 5) * 5;
2834
2835error:
2836 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2837 mutex_unlock(&ps->smi_mutex);
2838 return ret;
2839}
2840
2841static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2842{
2843 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2844 int ret;
2845
2846 *temp = 0;
2847
2848 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2849 if (ret < 0)
2850 return ret;
2851
2852 *temp = (ret & 0xff) - 25;
2853
2854 return 0;
2855}
2856
2857int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2858{
2859 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2860 return mv88e63xx_get_temp(ds, temp);
2861
2862 return mv88e61xx_get_temp(ds, temp);
2863}
2864
2865int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2866{
2867 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2868 int ret;
2869
2870 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2871 return -EOPNOTSUPP;
2872
2873 *temp = 0;
2874
2875 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2876 if (ret < 0)
2877 return ret;
2878
2879 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2880
2881 return 0;
2882}
2883
2884int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2885{
2886 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2887 int ret;
2888
2889 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2890 return -EOPNOTSUPP;
2891
2892 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2893 if (ret < 0)
2894 return ret;
2895 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2896 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2897 (ret & 0xe0ff) | (temp << 8));
2898}
2899
2900int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2901{
2902 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2903 int ret;
2904
2905 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2906 return -EOPNOTSUPP;
2907
2908 *alarm = false;
2909
2910 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2911 if (ret < 0)
2912 return ret;
2913
2914 *alarm = !!(ret & 0x40);
2915
2916 return 0;
2917}
2918#endif /* CONFIG_NET_DSA_HWMON */
2919
Ben Hutchings98e67302011-11-25 14:36:19 +00002920static int __init mv88e6xxx_init(void)
2921{
2922#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2923 register_switch_driver(&mv88e6131_switch_driver);
2924#endif
2925#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2926 register_switch_driver(&mv88e6123_61_65_switch_driver);
2927#endif
Guenter Roeck3ad50cc2014-10-29 10:44:56 -07002928#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2929 register_switch_driver(&mv88e6352_switch_driver);
2930#endif
Andrew Lunn42f27252014-09-12 23:58:44 +02002931#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2932 register_switch_driver(&mv88e6171_switch_driver);
2933#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002934 return 0;
2935}
2936module_init(mv88e6xxx_init);
2937
2938static void __exit mv88e6xxx_cleanup(void)
2939{
Andrew Lunn42f27252014-09-12 23:58:44 +02002940#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2941 unregister_switch_driver(&mv88e6171_switch_driver);
2942#endif
Vivien Didelot4212b5432015-05-01 10:43:52 -04002943#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2944 unregister_switch_driver(&mv88e6352_switch_driver);
2945#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002946#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2947 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2948#endif
2949#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2950 unregister_switch_driver(&mv88e6131_switch_driver);
2951#endif
2952}
2953module_exit(mv88e6xxx_cleanup);
Ben Hutchings3d825ed2011-11-25 14:37:16 +00002954
2955MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2956MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2957MODULE_LICENSE("GPL");