blob: 1a8c45f3e68057d1b9c8f8044c618bc9b68f7425 [file] [log] [blame]
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001/*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
Vivien Didelotb8fee952015-08-13 12:52:19 -04005 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
Andrew Lunn87c8cef2015-06-20 18:42:28 +020014#include <linux/debugfs.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000015#include <linux/delay.h>
Guenter Roeckdefb05b2015-03-26 18:36:38 -070016#include <linux/etherdevice.h>
Andrew Lunndea87022015-08-31 15:56:47 +020017#include <linux/ethtool.h>
Guenter Roeckfacd95b2015-03-26 18:36:35 -070018#include <linux/if_bridge.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000019#include <linux/jiffies.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000020#include <linux/list.h>
Paul Gortmaker2bbba272012-01-24 10:41:40 +000021#include <linux/module.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000022#include <linux/netdevice.h>
23#include <linux/phy.h>
Andrew Lunn87c8cef2015-06-20 18:42:28 +020024#include <linux/seq_file.h>
Ben Hutchingsc8f0b862011-11-27 17:06:08 +000025#include <net/dsa.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000026#include "mv88e6xxx.h"
27
Andrew Lunn16fe24f2015-05-06 01:09:55 +020028/* MDIO bus access can be nested in the case of PHYs connected to the
29 * internal MDIO bus of the switch, which is accessed via MDIO bus of
30 * the Ethernet interface. Avoid lockdep false positives by using
31 * mutex_lock_nested().
32 */
33static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
34{
35 int ret;
36
37 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
38 ret = bus->read(bus, addr, regnum);
39 mutex_unlock(&bus->mdio_lock);
40
41 return ret;
42}
43
44static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
45 u16 val)
46{
47 int ret;
48
49 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
50 ret = bus->write(bus, addr, regnum, val);
51 mutex_unlock(&bus->mdio_lock);
52
53 return ret;
54}
55
Barry Grussling3675c8d2013-01-08 16:05:53 +000056/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000057 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
58 * will be directly accessible on some {device address,register address}
59 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
60 * will only respond to SMI transactions to that specific address, and
61 * an indirect addressing mechanism needs to be used to access its
62 * registers.
63 */
64static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
65{
66 int ret;
67 int i;
68
69 for (i = 0; i < 16; i++) {
Andrew Lunn16fe24f2015-05-06 01:09:55 +020070 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000071 if (ret < 0)
72 return ret;
73
Andrew Lunncca8b132015-04-02 04:06:39 +020074 if ((ret & SMI_CMD_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000075 return 0;
76 }
77
78 return -ETIMEDOUT;
79}
80
81int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
82{
83 int ret;
84
85 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +020086 return mv88e6xxx_mdiobus_read(bus, addr, reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000087
Barry Grussling3675c8d2013-01-08 16:05:53 +000088 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000089 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
90 if (ret < 0)
91 return ret;
92
Barry Grussling3675c8d2013-01-08 16:05:53 +000093 /* Transmit the read command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +020094 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
95 SMI_CMD_OP_22_READ | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000096 if (ret < 0)
97 return ret;
98
Barry Grussling3675c8d2013-01-08 16:05:53 +000099 /* Wait for the read command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000100 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
101 if (ret < 0)
102 return ret;
103
Barry Grussling3675c8d2013-01-08 16:05:53 +0000104 /* Read the data. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200105 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000106 if (ret < 0)
107 return ret;
108
109 return ret & 0xffff;
110}
111
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700112/* Must be called with SMI mutex held */
113static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000114{
Guenter Roeckb184e492014-10-17 12:30:58 -0700115 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000116 int ret;
117
Guenter Roeckb184e492014-10-17 12:30:58 -0700118 if (bus == NULL)
119 return -EINVAL;
120
Guenter Roeckb184e492014-10-17 12:30:58 -0700121 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500122 if (ret < 0)
123 return ret;
124
125 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
126 addr, reg, ret);
127
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000128 return ret;
129}
130
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700131int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
132{
133 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
134 int ret;
135
136 mutex_lock(&ps->smi_mutex);
137 ret = _mv88e6xxx_reg_read(ds, addr, reg);
138 mutex_unlock(&ps->smi_mutex);
139
140 return ret;
141}
142
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000143int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
144 int reg, u16 val)
145{
146 int ret;
147
148 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200149 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000150
Barry Grussling3675c8d2013-01-08 16:05:53 +0000151 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000152 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
153 if (ret < 0)
154 return ret;
155
Barry Grussling3675c8d2013-01-08 16:05:53 +0000156 /* Transmit the data to write. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200157 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000158 if (ret < 0)
159 return ret;
160
Barry Grussling3675c8d2013-01-08 16:05:53 +0000161 /* Transmit the write command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200162 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
163 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000164 if (ret < 0)
165 return ret;
166
Barry Grussling3675c8d2013-01-08 16:05:53 +0000167 /* Wait for the write command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000168 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
169 if (ret < 0)
170 return ret;
171
172 return 0;
173}
174
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700175/* Must be called with SMI mutex held */
176static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
177 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000178{
Guenter Roeckb184e492014-10-17 12:30:58 -0700179 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000180
Guenter Roeckb184e492014-10-17 12:30:58 -0700181 if (bus == NULL)
182 return -EINVAL;
183
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500184 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
185 addr, reg, val);
186
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700187 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
188}
189
190int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
191{
192 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
193 int ret;
194
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000195 mutex_lock(&ps->smi_mutex);
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700196 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000197 mutex_unlock(&ps->smi_mutex);
198
199 return ret;
200}
201
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000202int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
203{
Andrew Lunncca8b132015-04-02 04:06:39 +0200204 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
205 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
206 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000207
208 return 0;
209}
210
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000211int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
212{
213 int i;
214 int ret;
215
216 for (i = 0; i < 6; i++) {
217 int j;
218
Barry Grussling3675c8d2013-01-08 16:05:53 +0000219 /* Write the MAC address byte. */
Andrew Lunncca8b132015-04-02 04:06:39 +0200220 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
221 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000222
Barry Grussling3675c8d2013-01-08 16:05:53 +0000223 /* Wait for the write to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000224 for (j = 0; j < 16; j++) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200225 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
226 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000227 break;
228 }
229 if (j == 16)
230 return -ETIMEDOUT;
231 }
232
233 return 0;
234}
235
Andrew Lunn3898c142015-05-06 01:09:53 +0200236/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200237static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000238{
239 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200240 return _mv88e6xxx_reg_read(ds, addr, regnum);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000241 return 0xffff;
242}
243
Andrew Lunn3898c142015-05-06 01:09:53 +0200244/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200245static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
246 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000247{
248 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200249 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000250 return 0;
251}
252
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000253#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
254static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
255{
256 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000257 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000258
Andrew Lunncca8b132015-04-02 04:06:39 +0200259 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
260 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
261 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000262
Barry Grussling19b2f972013-01-08 16:05:54 +0000263 timeout = jiffies + 1 * HZ;
264 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200265 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000266 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200267 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
268 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000269 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000270 }
271
272 return -ETIMEDOUT;
273}
274
275static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
276{
277 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000278 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000279
Andrew Lunncca8b132015-04-02 04:06:39 +0200280 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
281 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000282
Barry Grussling19b2f972013-01-08 16:05:54 +0000283 timeout = jiffies + 1 * HZ;
284 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200285 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000286 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200287 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
288 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000289 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000290 }
291
292 return -ETIMEDOUT;
293}
294
295static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
296{
297 struct mv88e6xxx_priv_state *ps;
298
299 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
300 if (mutex_trylock(&ps->ppu_mutex)) {
Barry Grussling85686582013-01-08 16:05:56 +0000301 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000302
Barry Grussling85686582013-01-08 16:05:56 +0000303 if (mv88e6xxx_ppu_enable(ds) == 0)
304 ps->ppu_disabled = 0;
305 mutex_unlock(&ps->ppu_mutex);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000306 }
307}
308
309static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
310{
311 struct mv88e6xxx_priv_state *ps = (void *)_ps;
312
313 schedule_work(&ps->ppu_work);
314}
315
316static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
317{
Florian Fainellia22adce2014-04-28 11:14:28 -0700318 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000319 int ret;
320
321 mutex_lock(&ps->ppu_mutex);
322
Barry Grussling3675c8d2013-01-08 16:05:53 +0000323 /* If the PHY polling unit is enabled, disable it so that
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000324 * we can access the PHY registers. If it was already
325 * disabled, cancel the timer that is going to re-enable
326 * it.
327 */
328 if (!ps->ppu_disabled) {
Barry Grussling85686582013-01-08 16:05:56 +0000329 ret = mv88e6xxx_ppu_disable(ds);
330 if (ret < 0) {
331 mutex_unlock(&ps->ppu_mutex);
332 return ret;
333 }
334 ps->ppu_disabled = 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000335 } else {
Barry Grussling85686582013-01-08 16:05:56 +0000336 del_timer(&ps->ppu_timer);
337 ret = 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000338 }
339
340 return ret;
341}
342
343static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
344{
Florian Fainellia22adce2014-04-28 11:14:28 -0700345 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000346
Barry Grussling3675c8d2013-01-08 16:05:53 +0000347 /* Schedule a timer to re-enable the PHY polling unit. */
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000348 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
349 mutex_unlock(&ps->ppu_mutex);
350}
351
352void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
353{
Florian Fainellia22adce2014-04-28 11:14:28 -0700354 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000355
356 mutex_init(&ps->ppu_mutex);
357 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
358 init_timer(&ps->ppu_timer);
359 ps->ppu_timer.data = (unsigned long)ps;
360 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
361}
362
363int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
364{
365 int ret;
366
367 ret = mv88e6xxx_ppu_access_get(ds);
368 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000369 ret = mv88e6xxx_reg_read(ds, addr, regnum);
370 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000371 }
372
373 return ret;
374}
375
376int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
377 int regnum, u16 val)
378{
379 int ret;
380
381 ret = mv88e6xxx_ppu_access_get(ds);
382 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000383 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
384 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000385 }
386
387 return ret;
388}
389#endif
390
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000391void mv88e6xxx_poll_link(struct dsa_switch *ds)
392{
393 int i;
394
395 for (i = 0; i < DSA_MAX_PORTS; i++) {
396 struct net_device *dev;
Ingo Molnar2a9e7972008-11-25 16:50:49 -0800397 int uninitialized_var(port_status);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000398 int link;
399 int speed;
400 int duplex;
401 int fc;
402
403 dev = ds->ports[i];
404 if (dev == NULL)
405 continue;
406
407 link = 0;
408 if (dev->flags & IFF_UP) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200409 port_status = mv88e6xxx_reg_read(ds, REG_PORT(i),
410 PORT_STATUS);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000411 if (port_status < 0)
412 continue;
413
Andrew Lunncca8b132015-04-02 04:06:39 +0200414 link = !!(port_status & PORT_STATUS_LINK);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000415 }
416
417 if (!link) {
418 if (netif_carrier_ok(dev)) {
Barry Grusslingab381a92013-01-08 16:05:55 +0000419 netdev_info(dev, "link down\n");
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000420 netif_carrier_off(dev);
421 }
422 continue;
423 }
424
Andrew Lunncca8b132015-04-02 04:06:39 +0200425 switch (port_status & PORT_STATUS_SPEED_MASK) {
426 case PORT_STATUS_SPEED_10:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000427 speed = 10;
428 break;
Andrew Lunncca8b132015-04-02 04:06:39 +0200429 case PORT_STATUS_SPEED_100:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000430 speed = 100;
431 break;
Andrew Lunncca8b132015-04-02 04:06:39 +0200432 case PORT_STATUS_SPEED_1000:
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000433 speed = 1000;
434 break;
435 default:
436 speed = -1;
437 break;
438 }
Andrew Lunncca8b132015-04-02 04:06:39 +0200439 duplex = (port_status & PORT_STATUS_DUPLEX) ? 1 : 0;
440 fc = (port_status & PORT_STATUS_PAUSE_EN) ? 1 : 0;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000441
442 if (!netif_carrier_ok(dev)) {
Barry Grusslingab381a92013-01-08 16:05:55 +0000443 netdev_info(dev,
444 "link up, %d Mb/s, %s duplex, flow control %sabled\n",
445 speed,
446 duplex ? "full" : "half",
447 fc ? "en" : "dis");
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000448 netif_carrier_on(dev);
449 }
450 }
451}
452
Andrew Lunn54d792f2015-05-06 01:09:47 +0200453static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
454{
455 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
456
457 switch (ps->id) {
458 case PORT_SWITCH_ID_6031:
459 case PORT_SWITCH_ID_6061:
460 case PORT_SWITCH_ID_6035:
461 case PORT_SWITCH_ID_6065:
462 return true;
463 }
464 return false;
465}
466
467static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
468{
469 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
470
471 switch (ps->id) {
472 case PORT_SWITCH_ID_6092:
473 case PORT_SWITCH_ID_6095:
474 return true;
475 }
476 return false;
477}
478
479static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
480{
481 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
482
483 switch (ps->id) {
484 case PORT_SWITCH_ID_6046:
485 case PORT_SWITCH_ID_6085:
486 case PORT_SWITCH_ID_6096:
487 case PORT_SWITCH_ID_6097:
488 return true;
489 }
490 return false;
491}
492
493static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
494{
495 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
496
497 switch (ps->id) {
498 case PORT_SWITCH_ID_6123:
499 case PORT_SWITCH_ID_6161:
500 case PORT_SWITCH_ID_6165:
501 return true;
502 }
503 return false;
504}
505
506static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
507{
508 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
509
510 switch (ps->id) {
511 case PORT_SWITCH_ID_6121:
512 case PORT_SWITCH_ID_6122:
513 case PORT_SWITCH_ID_6152:
514 case PORT_SWITCH_ID_6155:
515 case PORT_SWITCH_ID_6182:
516 case PORT_SWITCH_ID_6185:
517 case PORT_SWITCH_ID_6108:
518 case PORT_SWITCH_ID_6131:
519 return true;
520 }
521 return false;
522}
523
Guenter Roeckc22995c2015-07-25 09:42:28 -0700524static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700525{
526 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
527
528 switch (ps->id) {
529 case PORT_SWITCH_ID_6320:
530 case PORT_SWITCH_ID_6321:
531 return true;
532 }
533 return false;
534}
535
Andrew Lunn54d792f2015-05-06 01:09:47 +0200536static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
537{
538 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
539
540 switch (ps->id) {
541 case PORT_SWITCH_ID_6171:
542 case PORT_SWITCH_ID_6175:
543 case PORT_SWITCH_ID_6350:
544 case PORT_SWITCH_ID_6351:
545 return true;
546 }
547 return false;
548}
549
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200550static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
551{
552 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
553
554 switch (ps->id) {
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200555 case PORT_SWITCH_ID_6172:
556 case PORT_SWITCH_ID_6176:
Andrew Lunn54d792f2015-05-06 01:09:47 +0200557 case PORT_SWITCH_ID_6240:
558 case PORT_SWITCH_ID_6352:
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200559 return true;
560 }
561 return false;
562}
563
Andrew Lunndea87022015-08-31 15:56:47 +0200564/* We expect the switch to perform auto negotiation if there is a real
565 * phy. However, in the case of a fixed link phy, we force the port
566 * settings from the fixed link settings.
567 */
568void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
569 struct phy_device *phydev)
570{
571 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
572 u32 ret, reg;
573
574 if (!phy_is_pseudo_fixed_link(phydev))
575 return;
576
577 mutex_lock(&ps->smi_mutex);
578
579 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
580 if (ret < 0)
581 goto out;
582
583 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
584 PORT_PCS_CTRL_FORCE_LINK |
585 PORT_PCS_CTRL_DUPLEX_FULL |
586 PORT_PCS_CTRL_FORCE_DUPLEX |
587 PORT_PCS_CTRL_UNFORCED);
588
589 reg |= PORT_PCS_CTRL_FORCE_LINK;
590 if (phydev->link)
591 reg |= PORT_PCS_CTRL_LINK_UP;
592
593 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
594 goto out;
595
596 switch (phydev->speed) {
597 case SPEED_1000:
598 reg |= PORT_PCS_CTRL_1000;
599 break;
600 case SPEED_100:
601 reg |= PORT_PCS_CTRL_100;
602 break;
603 case SPEED_10:
604 reg |= PORT_PCS_CTRL_10;
605 break;
606 default:
607 pr_info("Unknown speed");
608 goto out;
609 }
610
611 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
612 if (phydev->duplex == DUPLEX_FULL)
613 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
614
615 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
616
617out:
618 mutex_unlock(&ps->smi_mutex);
619}
620
Andrew Lunn31888232015-05-06 01:09:54 +0200621/* Must be called with SMI mutex held */
622static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000623{
624 int ret;
625 int i;
626
627 for (i = 0; i < 10; i++) {
Andrew Lunn31888232015-05-06 01:09:54 +0200628 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
Andrew Lunncca8b132015-04-02 04:06:39 +0200629 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000630 return 0;
631 }
632
633 return -ETIMEDOUT;
634}
635
Andrew Lunn31888232015-05-06 01:09:54 +0200636/* Must be called with SMI mutex held */
637static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000638{
639 int ret;
640
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700641 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200642 port = (port + 1) << 5;
643
Barry Grussling3675c8d2013-01-08 16:05:53 +0000644 /* Snapshot the hardware statistics counters for this port. */
Andrew Lunn31888232015-05-06 01:09:54 +0200645 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
646 GLOBAL_STATS_OP_CAPTURE_PORT |
647 GLOBAL_STATS_OP_HIST_RX_TX | port);
648 if (ret < 0)
649 return ret;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000650
Barry Grussling3675c8d2013-01-08 16:05:53 +0000651 /* Wait for the snapshotting to complete. */
Andrew Lunn31888232015-05-06 01:09:54 +0200652 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000653 if (ret < 0)
654 return ret;
655
656 return 0;
657}
658
Andrew Lunn31888232015-05-06 01:09:54 +0200659/* Must be called with SMI mutex held */
660static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000661{
662 u32 _val;
663 int ret;
664
665 *val = 0;
666
Andrew Lunn31888232015-05-06 01:09:54 +0200667 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
668 GLOBAL_STATS_OP_READ_CAPTURED |
669 GLOBAL_STATS_OP_HIST_RX_TX | stat);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000670 if (ret < 0)
671 return;
672
Andrew Lunn31888232015-05-06 01:09:54 +0200673 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000674 if (ret < 0)
675 return;
676
Andrew Lunn31888232015-05-06 01:09:54 +0200677 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000678 if (ret < 0)
679 return;
680
681 _val = ret << 16;
682
Andrew Lunn31888232015-05-06 01:09:54 +0200683 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000684 if (ret < 0)
685 return;
686
687 *val = _val | ret;
688}
689
Andrew Lunne413e7e2015-04-02 04:06:38 +0200690static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
691 { "in_good_octets", 8, 0x00, },
692 { "in_bad_octets", 4, 0x02, },
693 { "in_unicast", 4, 0x04, },
694 { "in_broadcasts", 4, 0x06, },
695 { "in_multicasts", 4, 0x07, },
696 { "in_pause", 4, 0x16, },
697 { "in_undersize", 4, 0x18, },
698 { "in_fragments", 4, 0x19, },
699 { "in_oversize", 4, 0x1a, },
700 { "in_jabber", 4, 0x1b, },
701 { "in_rx_error", 4, 0x1c, },
702 { "in_fcs_error", 4, 0x1d, },
703 { "out_octets", 8, 0x0e, },
704 { "out_unicast", 4, 0x10, },
705 { "out_broadcasts", 4, 0x13, },
706 { "out_multicasts", 4, 0x12, },
707 { "out_pause", 4, 0x15, },
708 { "excessive", 4, 0x11, },
709 { "collisions", 4, 0x1e, },
710 { "deferred", 4, 0x05, },
711 { "single", 4, 0x14, },
712 { "multiple", 4, 0x17, },
713 { "out_fcs_error", 4, 0x03, },
714 { "late", 4, 0x1f, },
715 { "hist_64bytes", 4, 0x08, },
716 { "hist_65_127bytes", 4, 0x09, },
717 { "hist_128_255bytes", 4, 0x0a, },
718 { "hist_256_511bytes", 4, 0x0b, },
719 { "hist_512_1023bytes", 4, 0x0c, },
720 { "hist_1024_max_bytes", 4, 0x0d, },
721 /* Not all devices have the following counters */
722 { "sw_in_discards", 4, 0x110, },
723 { "sw_in_filtered", 2, 0x112, },
724 { "sw_out_filtered", 2, 0x113, },
725
726};
727
728static bool have_sw_in_discards(struct dsa_switch *ds)
729{
730 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
731
732 switch (ps->id) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200733 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
734 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
735 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
736 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
737 case PORT_SWITCH_ID_6352:
Andrew Lunne413e7e2015-04-02 04:06:38 +0200738 return true;
739 default:
740 return false;
741 }
742}
743
744static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
745 int nr_stats,
746 struct mv88e6xxx_hw_stat *stats,
747 int port, uint8_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000748{
749 int i;
750
751 for (i = 0; i < nr_stats; i++) {
752 memcpy(data + i * ETH_GSTRING_LEN,
753 stats[i].string, ETH_GSTRING_LEN);
754 }
755}
756
Andrew Lunn80c46272015-06-20 18:42:30 +0200757static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
758 int stat,
759 struct mv88e6xxx_hw_stat *stats,
760 int port)
761{
762 struct mv88e6xxx_hw_stat *s = stats + stat;
763 u32 low;
764 u32 high = 0;
765 int ret;
766 u64 value;
767
768 if (s->reg >= 0x100) {
769 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
770 s->reg - 0x100);
771 if (ret < 0)
772 return UINT64_MAX;
773
774 low = ret;
775 if (s->sizeof_stat == 4) {
776 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
777 s->reg - 0x100 + 1);
778 if (ret < 0)
779 return UINT64_MAX;
780 high = ret;
781 }
782 } else {
783 _mv88e6xxx_stats_read(ds, s->reg, &low);
784 if (s->sizeof_stat == 8)
785 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
786 }
787 value = (((u64)high) << 16) | low;
788 return value;
789}
790
Andrew Lunne413e7e2015-04-02 04:06:38 +0200791static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
792 int nr_stats,
793 struct mv88e6xxx_hw_stat *stats,
794 int port, uint64_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000795{
Florian Fainellia22adce2014-04-28 11:14:28 -0700796 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000797 int ret;
798 int i;
799
Andrew Lunn31888232015-05-06 01:09:54 +0200800 mutex_lock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000801
Andrew Lunn31888232015-05-06 01:09:54 +0200802 ret = _mv88e6xxx_stats_snapshot(ds, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000803 if (ret < 0) {
Andrew Lunn31888232015-05-06 01:09:54 +0200804 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000805 return;
806 }
807
Barry Grussling3675c8d2013-01-08 16:05:53 +0000808 /* Read each of the counters. */
Andrew Lunn80c46272015-06-20 18:42:30 +0200809 for (i = 0; i < nr_stats; i++)
810 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000811
Andrew Lunn31888232015-05-06 01:09:54 +0200812 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000813}
Ben Hutchings98e67302011-11-25 14:36:19 +0000814
Andrew Lunne413e7e2015-04-02 04:06:38 +0200815/* All the statistics in the table */
816void
817mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
818{
819 if (have_sw_in_discards(ds))
820 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
821 mv88e6xxx_hw_stats, port, data);
822 else
823 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
824 mv88e6xxx_hw_stats, port, data);
825}
826
827int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
828{
829 if (have_sw_in_discards(ds))
830 return ARRAY_SIZE(mv88e6xxx_hw_stats);
831 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
832}
833
834void
835mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
836 int port, uint64_t *data)
837{
838 if (have_sw_in_discards(ds))
839 _mv88e6xxx_get_ethtool_stats(
840 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
841 mv88e6xxx_hw_stats, port, data);
842 else
843 _mv88e6xxx_get_ethtool_stats(
844 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
845 mv88e6xxx_hw_stats, port, data);
846}
847
Guenter Roecka1ab91f2014-10-29 10:45:05 -0700848int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
849{
850 return 32 * sizeof(u16);
851}
852
853void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
854 struct ethtool_regs *regs, void *_p)
855{
856 u16 *p = _p;
857 int i;
858
859 regs->version = 0;
860
861 memset(p, 0xff, 32 * sizeof(u16));
862
863 for (i = 0; i < 32; i++) {
864 int ret;
865
866 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
867 if (ret >= 0)
868 p[i] = ret;
869 }
870}
871
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700872/* Must be called with SMI lock held */
Andrew Lunn3898c142015-05-06 01:09:53 +0200873static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
874 u16 mask)
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700875{
876 unsigned long timeout = jiffies + HZ / 10;
877
878 while (time_before(jiffies, timeout)) {
879 int ret;
880
881 ret = _mv88e6xxx_reg_read(ds, reg, offset);
882 if (ret < 0)
883 return ret;
884 if (!(ret & mask))
885 return 0;
886
887 usleep_range(1000, 2000);
888 }
889 return -ETIMEDOUT;
890}
891
Andrew Lunn3898c142015-05-06 01:09:53 +0200892static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
893{
894 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
895 int ret;
896
897 mutex_lock(&ps->smi_mutex);
898 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
899 mutex_unlock(&ps->smi_mutex);
900
901 return ret;
902}
903
904static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
905{
906 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
907 GLOBAL2_SMI_OP_BUSY);
908}
909
910int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
911{
912 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
913 GLOBAL2_EEPROM_OP_LOAD);
914}
915
916int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
917{
918 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
919 GLOBAL2_EEPROM_OP_BUSY);
920}
921
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700922/* Must be called with SMI lock held */
923static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
924{
Andrew Lunncca8b132015-04-02 04:06:39 +0200925 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
926 GLOBAL_ATU_OP_BUSY);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700927}
928
Andrew Lunn56d95e22015-06-20 18:42:33 +0200929/* Must be called with SMI lock held */
930static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
931{
932 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
933 GLOBAL2_SCRATCH_BUSY);
934}
935
Andrew Lunn3898c142015-05-06 01:09:53 +0200936/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200937static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
938 int regnum)
Andrew Lunnf3044682015-02-14 19:17:50 +0100939{
940 int ret;
941
Andrew Lunn3898c142015-05-06 01:09:53 +0200942 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
943 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
944 regnum);
Andrew Lunnf3044682015-02-14 19:17:50 +0100945 if (ret < 0)
946 return ret;
947
Andrew Lunn3898c142015-05-06 01:09:53 +0200948 ret = _mv88e6xxx_phy_wait(ds);
949 if (ret < 0)
950 return ret;
951
952 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
Andrew Lunnf3044682015-02-14 19:17:50 +0100953}
954
Andrew Lunn3898c142015-05-06 01:09:53 +0200955/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200956static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
957 int regnum, u16 val)
Andrew Lunnf3044682015-02-14 19:17:50 +0100958{
Andrew Lunn3898c142015-05-06 01:09:53 +0200959 int ret;
Andrew Lunnf3044682015-02-14 19:17:50 +0100960
Andrew Lunn3898c142015-05-06 01:09:53 +0200961 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
962 if (ret < 0)
963 return ret;
964
965 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
966 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
967 regnum);
968
969 return _mv88e6xxx_phy_wait(ds);
Andrew Lunnf3044682015-02-14 19:17:50 +0100970}
971
Guenter Roeck11b3b452015-03-06 22:23:51 -0800972int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
973{
Andrew Lunn2f40c692015-04-02 04:06:37 +0200974 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800975 int reg;
976
Andrew Lunn3898c142015-05-06 01:09:53 +0200977 mutex_lock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200978
979 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800980 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200981 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800982
983 e->eee_enabled = !!(reg & 0x0200);
984 e->tx_lpi_enabled = !!(reg & 0x0100);
985
Andrew Lunn3898c142015-05-06 01:09:53 +0200986 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800987 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200988 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800989
Andrew Lunncca8b132015-04-02 04:06:39 +0200990 e->eee_active = !!(reg & PORT_STATUS_EEE);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200991 reg = 0;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800992
Andrew Lunn2f40c692015-04-02 04:06:37 +0200993out:
Andrew Lunn3898c142015-05-06 01:09:53 +0200994 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200995 return reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800996}
997
998int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
999 struct phy_device *phydev, struct ethtool_eee *e)
1000{
Andrew Lunn2f40c692015-04-02 04:06:37 +02001001 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1002 int reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001003 int ret;
1004
Andrew Lunn3898c142015-05-06 01:09:53 +02001005 mutex_lock(&ps->smi_mutex);
Guenter Roeck11b3b452015-03-06 22:23:51 -08001006
Andrew Lunn2f40c692015-04-02 04:06:37 +02001007 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
1008 if (ret < 0)
1009 goto out;
1010
1011 reg = ret & ~0x0300;
1012 if (e->eee_enabled)
1013 reg |= 0x0200;
1014 if (e->tx_lpi_enabled)
1015 reg |= 0x0100;
1016
1017 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
1018out:
Andrew Lunn3898c142015-05-06 01:09:53 +02001019 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +02001020
1021 return ret;
Guenter Roeck11b3b452015-03-06 22:23:51 -08001022}
1023
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001024static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, int fid, u16 cmd)
1025{
1026 int ret;
1027
Vivien Didelota08df0f2015-08-10 09:09:46 -04001028 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001029 if (ret < 0)
1030 return ret;
1031
Andrew Lunncca8b132015-04-02 04:06:39 +02001032 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001033 if (ret < 0)
1034 return ret;
1035
1036 return _mv88e6xxx_atu_wait(ds);
1037}
1038
1039static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
1040{
1041 int ret;
1042
1043 ret = _mv88e6xxx_atu_wait(ds);
1044 if (ret < 0)
1045 return ret;
1046
Andrew Lunncca8b132015-04-02 04:06:39 +02001047 return _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_FLUSH_NON_STATIC_DB);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001048}
1049
1050static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1051{
1052 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Geert Uytterhoevenc3ffe6d2015-04-16 20:49:14 +02001053 int reg, ret = 0;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001054 u8 oldstate;
1055
1056 mutex_lock(&ps->smi_mutex);
1057
Andrew Lunncca8b132015-04-02 04:06:39 +02001058 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
Guenter Roeck538cc282015-04-15 22:12:42 -07001059 if (reg < 0) {
1060 ret = reg;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001061 goto abort;
Guenter Roeck538cc282015-04-15 22:12:42 -07001062 }
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001063
Andrew Lunncca8b132015-04-02 04:06:39 +02001064 oldstate = reg & PORT_CONTROL_STATE_MASK;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001065 if (oldstate != state) {
1066 /* Flush forwarding database if we're moving a port
1067 * from Learning or Forwarding state to Disabled or
1068 * Blocking or Listening state.
1069 */
Andrew Lunncca8b132015-04-02 04:06:39 +02001070 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1071 state <= PORT_CONTROL_STATE_BLOCKING) {
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001072 ret = _mv88e6xxx_flush_fid(ds, ps->fid[port]);
1073 if (ret)
1074 goto abort;
1075 }
Andrew Lunncca8b132015-04-02 04:06:39 +02001076 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1077 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1078 reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001079 }
1080
1081abort:
1082 mutex_unlock(&ps->smi_mutex);
1083 return ret;
1084}
1085
1086/* Must be called with smi lock held */
1087static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1088{
1089 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1090 u8 fid = ps->fid[port];
1091 u16 reg = fid << 12;
1092
1093 if (dsa_is_cpu_port(ds, port))
1094 reg |= ds->phys_port_mask;
1095 else
1096 reg |= (ps->bridge_mask[fid] |
1097 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1098
Andrew Lunncca8b132015-04-02 04:06:39 +02001099 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001100}
1101
1102/* Must be called with smi lock held */
1103static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1104{
1105 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1106 int port;
1107 u32 mask;
1108 int ret;
1109
1110 mask = ds->phys_port_mask;
1111 while (mask) {
1112 port = __ffs(mask);
1113 mask &= ~(1 << port);
1114 if (ps->fid[port] != fid)
1115 continue;
1116
1117 ret = _mv88e6xxx_update_port_config(ds, port);
1118 if (ret)
1119 return ret;
1120 }
1121
1122 return _mv88e6xxx_flush_fid(ds, fid);
1123}
1124
1125/* Bridge handling functions */
1126
1127int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1128{
1129 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1130 int ret = 0;
1131 u32 nmask;
1132 int fid;
1133
1134 /* If the bridge group is not empty, join that group.
1135 * Otherwise create a new group.
1136 */
1137 fid = ps->fid[port];
1138 nmask = br_port_mask & ~(1 << port);
1139 if (nmask)
1140 fid = ps->fid[__ffs(nmask)];
1141
1142 nmask = ps->bridge_mask[fid] | (1 << port);
1143 if (nmask != br_port_mask) {
1144 netdev_err(ds->ports[port],
1145 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1146 fid, br_port_mask, nmask);
1147 return -EINVAL;
1148 }
1149
1150 mutex_lock(&ps->smi_mutex);
1151
1152 ps->bridge_mask[fid] = br_port_mask;
1153
1154 if (fid != ps->fid[port]) {
Vivien Didelot194fea72015-08-10 09:09:47 -04001155 clear_bit(ps->fid[port], ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001156 ps->fid[port] = fid;
1157 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1158 }
1159
1160 mutex_unlock(&ps->smi_mutex);
1161
1162 return ret;
1163}
1164
1165int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1166{
1167 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1168 u8 fid, newfid;
1169 int ret;
1170
1171 fid = ps->fid[port];
1172
1173 if (ps->bridge_mask[fid] != br_port_mask) {
1174 netdev_err(ds->ports[port],
1175 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1176 fid, br_port_mask, ps->bridge_mask[fid]);
1177 return -EINVAL;
1178 }
1179
1180 /* If the port was the last port of a bridge, we are done.
1181 * Otherwise assign a new fid to the port, and fix up
1182 * the bridge configuration.
1183 */
1184 if (br_port_mask == (1 << port))
1185 return 0;
1186
1187 mutex_lock(&ps->smi_mutex);
1188
Vivien Didelot194fea72015-08-10 09:09:47 -04001189 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1190 if (unlikely(newfid > ps->num_ports)) {
1191 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1192 ps->num_ports);
1193 ret = -ENOSPC;
1194 goto unlock;
1195 }
1196
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001197 ps->fid[port] = newfid;
Vivien Didelot194fea72015-08-10 09:09:47 -04001198 set_bit(newfid, ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001199 ps->bridge_mask[fid] &= ~(1 << port);
1200 ps->bridge_mask[newfid] = 1 << port;
1201
1202 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1203 if (!ret)
1204 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1205
Vivien Didelot194fea72015-08-10 09:09:47 -04001206unlock:
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001207 mutex_unlock(&ps->smi_mutex);
1208
1209 return ret;
1210}
1211
1212int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1213{
1214 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1215 int stp_state;
1216
1217 switch (state) {
1218 case BR_STATE_DISABLED:
Andrew Lunncca8b132015-04-02 04:06:39 +02001219 stp_state = PORT_CONTROL_STATE_DISABLED;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001220 break;
1221 case BR_STATE_BLOCKING:
1222 case BR_STATE_LISTENING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001223 stp_state = PORT_CONTROL_STATE_BLOCKING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001224 break;
1225 case BR_STATE_LEARNING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001226 stp_state = PORT_CONTROL_STATE_LEARNING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001227 break;
1228 case BR_STATE_FORWARDING:
1229 default:
Andrew Lunncca8b132015-04-02 04:06:39 +02001230 stp_state = PORT_CONTROL_STATE_FORWARDING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001231 break;
1232 }
1233
1234 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1235
1236 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1237 * so we can not update the port state directly but need to schedule it.
1238 */
1239 ps->port_state[port] = stp_state;
1240 set_bit(port, &ps->port_state_update_mask);
1241 schedule_work(&ps->bridge_work);
1242
1243 return 0;
1244}
1245
Vivien Didelotb8fee952015-08-13 12:52:19 -04001246int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1247{
1248 int ret;
1249
1250 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1251 if (ret < 0)
1252 return ret;
1253
1254 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1255
1256 return 0;
1257}
1258
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001259int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1260{
1261 return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1262 pvid & PORT_DEFAULT_VLAN_MASK);
1263}
1264
Vivien Didelot6b17e862015-08-13 12:52:18 -04001265static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1266{
1267 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1268 GLOBAL_VTU_OP_BUSY);
1269}
1270
1271static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1272{
1273 int ret;
1274
1275 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1276 if (ret < 0)
1277 return ret;
1278
1279 return _mv88e6xxx_vtu_wait(ds);
1280}
1281
1282static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1283{
1284 int ret;
1285
1286 ret = _mv88e6xxx_vtu_wait(ds);
1287 if (ret < 0)
1288 return ret;
1289
1290 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1291}
1292
Vivien Didelotb8fee952015-08-13 12:52:19 -04001293static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1294 struct mv88e6xxx_vtu_stu_entry *entry,
1295 unsigned int nibble_offset)
1296{
1297 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1298 u16 regs[3];
1299 int i;
1300 int ret;
1301
1302 for (i = 0; i < 3; ++i) {
1303 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1304 GLOBAL_VTU_DATA_0_3 + i);
1305 if (ret < 0)
1306 return ret;
1307
1308 regs[i] = ret;
1309 }
1310
1311 for (i = 0; i < ps->num_ports; ++i) {
1312 unsigned int shift = (i % 4) * 4 + nibble_offset;
1313 u16 reg = regs[i / 4];
1314
1315 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1316 }
1317
1318 return 0;
1319}
1320
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001321static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1322 struct mv88e6xxx_vtu_stu_entry *entry,
1323 unsigned int nibble_offset)
1324{
1325 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1326 u16 regs[3] = { 0 };
1327 int i;
1328 int ret;
1329
1330 for (i = 0; i < ps->num_ports; ++i) {
1331 unsigned int shift = (i % 4) * 4 + nibble_offset;
1332 u8 data = entry->data[i];
1333
1334 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1335 }
1336
1337 for (i = 0; i < 3; ++i) {
1338 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1339 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1340 if (ret < 0)
1341 return ret;
1342 }
1343
1344 return 0;
1345}
1346
Vivien Didelotb8fee952015-08-13 12:52:19 -04001347static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
1348 struct mv88e6xxx_vtu_stu_entry *entry)
1349{
1350 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1351 int ret;
1352
1353 ret = _mv88e6xxx_vtu_wait(ds);
1354 if (ret < 0)
1355 return ret;
1356
1357 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1358 vid & GLOBAL_VTU_VID_MASK);
1359 if (ret < 0)
1360 return ret;
1361
1362 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1363 if (ret < 0)
1364 return ret;
1365
1366 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1367 if (ret < 0)
1368 return ret;
1369
1370 next.vid = ret & GLOBAL_VTU_VID_MASK;
1371 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1372
1373 if (next.valid) {
1374 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1375 if (ret < 0)
1376 return ret;
1377
1378 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1379 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1380 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1381 GLOBAL_VTU_FID);
1382 if (ret < 0)
1383 return ret;
1384
1385 next.fid = ret & GLOBAL_VTU_FID_MASK;
1386
1387 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1388 GLOBAL_VTU_SID);
1389 if (ret < 0)
1390 return ret;
1391
1392 next.sid = ret & GLOBAL_VTU_SID_MASK;
1393 }
1394 }
1395
1396 *entry = next;
1397 return 0;
1398}
1399
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001400static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1401 struct mv88e6xxx_vtu_stu_entry *entry)
1402{
1403 u16 reg = 0;
1404 int ret;
1405
1406 ret = _mv88e6xxx_vtu_wait(ds);
1407 if (ret < 0)
1408 return ret;
1409
1410 if (!entry->valid)
1411 goto loadpurge;
1412
1413 /* Write port member tags */
1414 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1415 if (ret < 0)
1416 return ret;
1417
1418 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1419 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1420 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1421 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1422 if (ret < 0)
1423 return ret;
1424
1425 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1426 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1427 if (ret < 0)
1428 return ret;
1429 }
1430
1431 reg = GLOBAL_VTU_VID_VALID;
1432loadpurge:
1433 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1434 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1435 if (ret < 0)
1436 return ret;
1437
1438 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1439}
1440
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001441static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1442 struct mv88e6xxx_vtu_stu_entry *entry)
1443{
1444 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1445 int ret;
1446
1447 ret = _mv88e6xxx_vtu_wait(ds);
1448 if (ret < 0)
1449 return ret;
1450
1451 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1452 sid & GLOBAL_VTU_SID_MASK);
1453 if (ret < 0)
1454 return ret;
1455
1456 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1457 if (ret < 0)
1458 return ret;
1459
1460 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1461 if (ret < 0)
1462 return ret;
1463
1464 next.sid = ret & GLOBAL_VTU_SID_MASK;
1465
1466 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1467 if (ret < 0)
1468 return ret;
1469
1470 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1471
1472 if (next.valid) {
1473 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1474 if (ret < 0)
1475 return ret;
1476 }
1477
1478 *entry = next;
1479 return 0;
1480}
1481
1482static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1483 struct mv88e6xxx_vtu_stu_entry *entry)
1484{
1485 u16 reg = 0;
1486 int ret;
1487
1488 ret = _mv88e6xxx_vtu_wait(ds);
1489 if (ret < 0)
1490 return ret;
1491
1492 if (!entry->valid)
1493 goto loadpurge;
1494
1495 /* Write port states */
1496 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1497 if (ret < 0)
1498 return ret;
1499
1500 reg = GLOBAL_VTU_VID_VALID;
1501loadpurge:
1502 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1503 if (ret < 0)
1504 return ret;
1505
1506 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1507 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1508 if (ret < 0)
1509 return ret;
1510
1511 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1512}
1513
1514static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1515 struct mv88e6xxx_vtu_stu_entry *entry)
1516{
1517 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1518 struct mv88e6xxx_vtu_stu_entry vlan = {
1519 .valid = true,
1520 .vid = vid,
1521 };
1522 int i;
1523
1524 /* exclude all ports except the CPU */
1525 for (i = 0; i < ps->num_ports; ++i)
1526 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1527 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1528 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1529
1530 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1531 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1532 struct mv88e6xxx_vtu_stu_entry vstp;
1533 int err;
1534
1535 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1536 * implemented, only one STU entry is needed to cover all VTU
1537 * entries. Thus, validate the SID 0.
1538 */
1539 vlan.sid = 0;
1540 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1541 if (err)
1542 return err;
1543
1544 if (vstp.sid != vlan.sid || !vstp.valid) {
1545 memset(&vstp, 0, sizeof(vstp));
1546 vstp.valid = true;
1547 vstp.sid = vlan.sid;
1548
1549 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1550 if (err)
1551 return err;
1552 }
1553
1554 /* Non-bridged ports and bridge groups use FIDs from 1 to
1555 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
1556 */
1557 vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
1558 ps->num_ports + 1);
1559 if (unlikely(vlan.fid == VLAN_N_VID)) {
1560 pr_err("no more FID available for VLAN %d\n", vid);
1561 return -ENOSPC;
1562 }
1563
1564 err = _mv88e6xxx_flush_fid(ds, vlan.fid);
1565 if (err)
1566 return err;
1567
1568 set_bit(vlan.fid, ps->fid_bitmap);
1569 }
1570
1571 *entry = vlan;
1572 return 0;
1573}
1574
1575int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1576 bool untagged)
1577{
1578 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1579 struct mv88e6xxx_vtu_stu_entry vlan;
1580 int err;
1581
1582 mutex_lock(&ps->smi_mutex);
1583 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1584 if (err)
1585 goto unlock;
1586
1587 if (vlan.vid != vid || !vlan.valid) {
1588 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1589 if (err)
1590 goto unlock;
1591 }
1592
1593 vlan.data[port] = untagged ?
1594 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1595 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1596
1597 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1598unlock:
1599 mutex_unlock(&ps->smi_mutex);
1600
1601 return err;
1602}
1603
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001604int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1605{
1606 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1607 struct mv88e6xxx_vtu_stu_entry vlan;
1608 bool keep = false;
1609 int i, err;
1610
1611 mutex_lock(&ps->smi_mutex);
1612
1613 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1614 if (err)
1615 goto unlock;
1616
1617 if (vlan.vid != vid || !vlan.valid ||
1618 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1619 err = -ENOENT;
1620 goto unlock;
1621 }
1622
1623 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1624
1625 /* keep the VLAN unless all ports are excluded */
1626 for (i = 0; i < ps->num_ports; ++i) {
1627 if (dsa_is_cpu_port(ds, i))
1628 continue;
1629
1630 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1631 keep = true;
1632 break;
1633 }
1634 }
1635
1636 vlan.valid = keep;
1637 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1638 if (err)
1639 goto unlock;
1640
1641 if (!keep)
1642 clear_bit(vlan.fid, ps->fid_bitmap);
1643
1644unlock:
1645 mutex_unlock(&ps->smi_mutex);
1646
1647 return err;
1648}
1649
Vivien Didelot02512b62015-08-13 12:52:20 -04001650static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
1651 struct mv88e6xxx_vtu_stu_entry *entry)
1652{
1653 int err;
1654
1655 do {
1656 if (vid == 4095)
1657 return -ENOENT;
1658
1659 err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
1660 if (err)
1661 return err;
1662
1663 if (!entry->valid)
1664 return -ENOENT;
1665
1666 vid = entry->vid;
1667 } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
1668 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
1669
1670 return 0;
1671}
1672
Vivien Didelotb8fee952015-08-13 12:52:19 -04001673int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1674 unsigned long *ports, unsigned long *untagged)
1675{
1676 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1677 struct mv88e6xxx_vtu_stu_entry next;
1678 int port;
1679 int err;
1680
1681 if (*vid == 4095)
1682 return -ENOENT;
1683
1684 mutex_lock(&ps->smi_mutex);
1685 err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
1686 mutex_unlock(&ps->smi_mutex);
1687
1688 if (err)
1689 return err;
1690
1691 if (!next.valid)
1692 return -ENOENT;
1693
1694 *vid = next.vid;
1695
1696 for (port = 0; port < ps->num_ports; ++port) {
1697 clear_bit(port, ports);
1698 clear_bit(port, untagged);
1699
1700 if (dsa_is_cpu_port(ds, port))
1701 continue;
1702
1703 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1704 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1705 set_bit(port, ports);
1706
1707 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1708 set_bit(port, untagged);
1709 }
1710
1711 return 0;
1712}
1713
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001714static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1715 const unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001716{
1717 int i, ret;
1718
1719 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001720 ret = _mv88e6xxx_reg_write(
1721 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1722 (addr[i * 2] << 8) | addr[i * 2 + 1]);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001723 if (ret < 0)
1724 return ret;
1725 }
1726
1727 return 0;
1728}
1729
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001730static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001731{
1732 int i, ret;
1733
1734 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001735 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1736 GLOBAL_ATU_MAC_01 + i);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001737 if (ret < 0)
1738 return ret;
1739 addr[i * 2] = ret >> 8;
1740 addr[i * 2 + 1] = ret & 0xff;
1741 }
1742
1743 return 0;
1744}
1745
Vivien Didelotfd231c82015-08-10 09:09:50 -04001746static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1747 struct mv88e6xxx_atu_entry *entry)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001748{
Vivien Didelotfd231c82015-08-10 09:09:50 -04001749 u16 reg = 0;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001750 int ret;
1751
1752 ret = _mv88e6xxx_atu_wait(ds);
1753 if (ret < 0)
1754 return ret;
1755
Vivien Didelotfd231c82015-08-10 09:09:50 -04001756 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001757 if (ret < 0)
1758 return ret;
1759
Vivien Didelotfd231c82015-08-10 09:09:50 -04001760 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1761 unsigned int mask, shift;
1762
1763 if (entry->trunk) {
1764 reg |= GLOBAL_ATU_DATA_TRUNK;
1765 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1766 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1767 } else {
1768 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1769 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1770 }
1771
1772 reg |= (entry->portv_trunkid << shift) & mask;
1773 }
1774
1775 reg |= entry->state & GLOBAL_ATU_DATA_STATE_MASK;
1776
1777 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, reg);
1778 if (ret < 0)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001779 return ret;
1780
Vivien Didelotfd231c82015-08-10 09:09:50 -04001781 return _mv88e6xxx_atu_cmd(ds, entry->fid, GLOBAL_ATU_OP_LOAD_DB);
1782}
David S. Millercdf09692015-08-11 12:00:37 -07001783
Vivien Didelotfd231c82015-08-10 09:09:50 -04001784static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1785{
1786 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot02512b62015-08-13 12:52:20 -04001787 struct mv88e6xxx_vtu_stu_entry vlan;
1788 int err;
Vivien Didelotfd231c82015-08-10 09:09:50 -04001789
1790 if (vid == 0)
1791 return ps->fid[port];
1792
Vivien Didelot02512b62015-08-13 12:52:20 -04001793 err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
1794 if (err)
1795 return err;
1796
1797 if (vlan.vid == vid)
1798 return vlan.fid;
1799
Vivien Didelotfd231c82015-08-10 09:09:50 -04001800 return -ENOENT;
1801}
1802
1803static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1804 const unsigned char *addr, u16 vid,
1805 u8 state)
1806{
1807 struct mv88e6xxx_atu_entry entry = { 0 };
1808 int ret;
1809
1810 ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
1811 if (ret < 0)
1812 return ret;
1813
1814 entry.fid = ret;
1815 entry.state = state;
1816 ether_addr_copy(entry.mac, addr);
1817 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1818 entry.trunk = false;
1819 entry.portv_trunkid = BIT(port);
1820 }
1821
1822 return _mv88e6xxx_atu_load(ds, &entry);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001823}
1824
David S. Millercdf09692015-08-11 12:00:37 -07001825int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
1826 const unsigned char *addr, u16 vid)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001827{
David S. Millercdf09692015-08-11 12:00:37 -07001828 int state = is_multicast_ether_addr(addr) ?
1829 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1830 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1831 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot6630e232015-08-06 01:44:07 -04001832 int ret;
1833
David S. Millercdf09692015-08-11 12:00:37 -07001834 mutex_lock(&ps->smi_mutex);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001835 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid, state);
David S. Millercdf09692015-08-11 12:00:37 -07001836 mutex_unlock(&ps->smi_mutex);
1837
1838 return ret;
1839}
1840
1841int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
1842 const unsigned char *addr, u16 vid)
1843{
1844 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1845 int ret;
1846
1847 mutex_lock(&ps->smi_mutex);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001848 ret = _mv88e6xxx_port_fdb_load(ds, port, addr, vid,
David S. Millercdf09692015-08-11 12:00:37 -07001849 GLOBAL_ATU_DATA_STATE_UNUSED);
1850 mutex_unlock(&ps->smi_mutex);
1851
1852 return ret;
1853}
1854
Vivien Didelot1d194042015-08-10 09:09:51 -04001855static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1856 const unsigned char *addr,
1857 struct mv88e6xxx_atu_entry *entry)
David S. Millercdf09692015-08-11 12:00:37 -07001858{
Vivien Didelot1d194042015-08-10 09:09:51 -04001859 struct mv88e6xxx_atu_entry next = { 0 };
1860 int ret;
1861
1862 next.fid = fid;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001863
1864 ret = _mv88e6xxx_atu_wait(ds);
1865 if (ret < 0)
1866 return ret;
1867
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001868 ret = _mv88e6xxx_atu_mac_write(ds, addr);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001869 if (ret < 0)
1870 return ret;
1871
Vivien Didelot1d194042015-08-10 09:09:51 -04001872 ret = _mv88e6xxx_atu_cmd(ds, fid, GLOBAL_ATU_OP_GET_NEXT_DB);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001873 if (ret < 0)
1874 return ret;
1875
Vivien Didelot1d194042015-08-10 09:09:51 -04001876 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1877 if (ret < 0)
1878 return ret;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001879
Vivien Didelot1d194042015-08-10 09:09:51 -04001880 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1881 if (ret < 0)
1882 return ret;
1883
1884 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1885 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1886 unsigned int mask, shift;
1887
1888 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1889 next.trunk = true;
1890 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1891 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1892 } else {
1893 next.trunk = false;
1894 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1895 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1896 }
1897
1898 next.portv_trunkid = (ret & mask) >> shift;
1899 }
1900
1901 *entry = next;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001902 return 0;
1903}
1904
David S. Millercdf09692015-08-11 12:00:37 -07001905/* get next entry for port */
1906int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
Vivien Didelot2a778e12015-08-10 09:09:49 -04001907 unsigned char *addr, u16 *vid, bool *is_static)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001908{
1909 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot1d194042015-08-10 09:09:51 -04001910 struct mv88e6xxx_atu_entry next;
1911 u16 fid;
Vivien Didelot87820512015-08-06 01:44:08 -04001912 int ret;
1913
1914 mutex_lock(&ps->smi_mutex);
Vivien Didelot1d194042015-08-10 09:09:51 -04001915
1916 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1917 if (ret < 0)
1918 goto unlock;
1919 fid = ret;
1920
1921 do {
1922 if (is_broadcast_ether_addr(addr)) {
Vivien Didelot02512b62015-08-13 12:52:20 -04001923 struct mv88e6xxx_vtu_stu_entry vtu;
1924
1925 ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
1926 if (ret < 0)
1927 goto unlock;
1928
1929 *vid = vtu.vid;
1930 fid = vtu.fid;
Vivien Didelot1d194042015-08-10 09:09:51 -04001931 }
1932
1933 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1934 if (ret < 0)
1935 goto unlock;
1936
1937 ether_addr_copy(addr, next.mac);
1938
1939 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1940 continue;
1941 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
1942
1943 *is_static = next.state == (is_multicast_ether_addr(addr) ?
1944 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1945 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1946unlock:
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001947 mutex_unlock(&ps->smi_mutex);
1948
1949 return ret;
1950}
1951
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001952static void mv88e6xxx_bridge_work(struct work_struct *work)
1953{
1954 struct mv88e6xxx_priv_state *ps;
1955 struct dsa_switch *ds;
1956 int port;
1957
1958 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1959 ds = ((struct dsa_switch *)ps) - 1;
1960
1961 while (ps->port_state_update_mask) {
1962 port = __ffs(ps->port_state_update_mask);
1963 clear_bit(port, &ps->port_state_update_mask);
1964 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
1965 }
1966}
1967
Andrew Lunndbde9e62015-05-06 01:09:48 +02001968static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
Guenter Roeckd827e882015-03-26 18:36:29 -07001969{
1970 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001971 int ret, fid;
Andrew Lunn54d792f2015-05-06 01:09:47 +02001972 u16 reg;
Guenter Roeckd827e882015-03-26 18:36:29 -07001973
1974 mutex_lock(&ps->smi_mutex);
1975
Andrew Lunn54d792f2015-05-06 01:09:47 +02001976 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
1977 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
1978 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07001979 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001980 /* MAC Forcing register: don't force link, speed,
1981 * duplex or flow control state to any particular
1982 * values on physical ports, but force the CPU port
1983 * and all DSA ports to their maximum bandwidth and
1984 * full duplex.
1985 */
1986 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
Andrew Lunn60045cb2015-08-17 23:52:51 +02001987 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02001988 reg |= PORT_PCS_CTRL_FORCE_LINK |
1989 PORT_PCS_CTRL_LINK_UP |
1990 PORT_PCS_CTRL_DUPLEX_FULL |
1991 PORT_PCS_CTRL_FORCE_DUPLEX;
1992 if (mv88e6xxx_6065_family(ds))
1993 reg |= PORT_PCS_CTRL_100;
1994 else
1995 reg |= PORT_PCS_CTRL_1000;
1996 } else {
1997 reg |= PORT_PCS_CTRL_UNFORCED;
1998 }
1999
2000 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2001 PORT_PCS_CTRL, reg);
2002 if (ret)
2003 goto abort;
2004 }
2005
2006 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2007 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2008 * tunneling, determine priority by looking at 802.1p and IP
2009 * priority fields (IP prio has precedence), and set STP state
2010 * to Forwarding.
2011 *
2012 * If this is the CPU link, use DSA or EDSA tagging depending
2013 * on which tagging mode was configured.
2014 *
2015 * If this is a link to another switch, use DSA tagging mode.
2016 *
2017 * If this is the upstream port for this switch, enable
2018 * forwarding of unknown unicasts and multicasts.
2019 */
2020 reg = 0;
2021 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2022 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2023 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002024 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002025 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2026 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2027 PORT_CONTROL_STATE_FORWARDING;
2028 if (dsa_is_cpu_port(ds, port)) {
2029 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2030 reg |= PORT_CONTROL_DSA_TAG;
2031 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002032 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2033 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002034 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2035 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2036 else
2037 reg |= PORT_CONTROL_FRAME_MODE_DSA;
2038 }
2039
2040 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2041 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2042 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002043 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002044 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2045 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2046 }
2047 }
Andrew Lunn6083ce72015-08-17 23:52:52 +02002048 if (dsa_is_dsa_port(ds, port)) {
2049 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2050 reg |= PORT_CONTROL_DSA_TAG;
2051 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2052 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2053 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002054 reg |= PORT_CONTROL_FRAME_MODE_DSA;
Andrew Lunn6083ce72015-08-17 23:52:52 +02002055 }
2056
Andrew Lunn54d792f2015-05-06 01:09:47 +02002057 if (port == dsa_upstream_port(ds))
2058 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2059 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2060 }
2061 if (reg) {
2062 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2063 PORT_CONTROL, reg);
2064 if (ret)
2065 goto abort;
2066 }
2067
Vivien Didelot8efdda42015-08-13 12:52:23 -04002068 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2069 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2070 * untagged frames on this port, do a destination address lookup on all
2071 * received packets as usual, disable ARP mirroring and don't send a
2072 * copy of all transmitted/received frames on this port to the CPU.
Andrew Lunn54d792f2015-05-06 01:09:47 +02002073 */
2074 reg = 0;
2075 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2076 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002077 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002078 reg = PORT_CONTROL_2_MAP_DA;
2079
2080 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002081 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002082 reg |= PORT_CONTROL_2_JUMBO_10240;
2083
2084 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2085 /* Set the upstream port this port should use */
2086 reg |= dsa_upstream_port(ds);
2087 /* enable forwarding of unknown multicast addresses to
2088 * the upstream port
2089 */
2090 if (port == dsa_upstream_port(ds))
2091 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2092 }
2093
Vivien Didelotf5117ce2015-08-19 18:54:55 -04002094 reg |= PORT_CONTROL_2_8021Q_FALLBACK;
Vivien Didelot8efdda42015-08-13 12:52:23 -04002095
Andrew Lunn54d792f2015-05-06 01:09:47 +02002096 if (reg) {
2097 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2098 PORT_CONTROL_2, reg);
2099 if (ret)
2100 goto abort;
2101 }
2102
2103 /* Port Association Vector: when learning source addresses
2104 * of packets, add the address to the address database using
2105 * a port bitmap that has only the bit for this port set and
2106 * the other bits clear.
2107 */
2108 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
2109 1 << port);
2110 if (ret)
2111 goto abort;
2112
2113 /* Egress rate control 2: disable egress rate control. */
2114 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2115 0x0000);
2116 if (ret)
2117 goto abort;
2118
2119 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002120 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2121 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002122 /* Do not limit the period of time that this port can
2123 * be paused for by the remote end or the period of
2124 * time that this port can pause the remote end.
2125 */
2126 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2127 PORT_PAUSE_CTRL, 0x0000);
2128 if (ret)
2129 goto abort;
2130
2131 /* Port ATU control: disable limiting the number of
2132 * address database entries that this port is allowed
2133 * to use.
2134 */
2135 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2136 PORT_ATU_CONTROL, 0x0000);
2137 /* Priority Override: disable DA, SA and VTU priority
2138 * override.
2139 */
2140 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2141 PORT_PRI_OVERRIDE, 0x0000);
2142 if (ret)
2143 goto abort;
2144
2145 /* Port Ethertype: use the Ethertype DSA Ethertype
2146 * value.
2147 */
2148 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2149 PORT_ETH_TYPE, ETH_P_EDSA);
2150 if (ret)
2151 goto abort;
2152 /* Tag Remap: use an identity 802.1p prio -> switch
2153 * prio mapping.
2154 */
2155 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2156 PORT_TAG_REGMAP_0123, 0x3210);
2157 if (ret)
2158 goto abort;
2159
2160 /* Tag Remap 2: use an identity 802.1p prio -> switch
2161 * prio mapping.
2162 */
2163 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2164 PORT_TAG_REGMAP_4567, 0x7654);
2165 if (ret)
2166 goto abort;
2167 }
2168
2169 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2170 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002171 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2172 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002173 /* Rate Control: disable ingress rate limiting. */
2174 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2175 PORT_RATE_CONTROL, 0x0001);
2176 if (ret)
2177 goto abort;
2178 }
2179
Guenter Roeck366f0a02015-03-26 18:36:30 -07002180 /* Port Control 1: disable trunking, disable sending
2181 * learning messages to this port.
Guenter Roeckd827e882015-03-26 18:36:29 -07002182 */
Vivien Didelot614f03f2015-04-20 17:19:23 -04002183 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07002184 if (ret)
2185 goto abort;
2186
2187 /* Port based VLAN map: give each port its own address
2188 * database, allow the CPU port to talk to each of the 'real'
2189 * ports, and allow each of the 'real' ports to only talk to
2190 * the upstream port.
2191 */
Vivien Didelot194fea72015-08-10 09:09:47 -04002192 fid = port + 1;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002193 ps->fid[port] = fid;
Vivien Didelot194fea72015-08-10 09:09:47 -04002194 set_bit(fid, ps->fid_bitmap);
Guenter Roeckd827e882015-03-26 18:36:29 -07002195
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002196 if (!dsa_is_cpu_port(ds, port))
2197 ps->bridge_mask[fid] = 1 << port;
2198
2199 ret = _mv88e6xxx_update_port_config(ds, port);
Guenter Roeckd827e882015-03-26 18:36:29 -07002200 if (ret)
2201 goto abort;
2202
2203 /* Default VLAN ID and priority: don't set a default VLAN
2204 * ID, and set the default packet priority to zero.
2205 */
Vivien Didelot47cf1e652015-04-20 17:43:26 -04002206 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2207 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07002208abort:
2209 mutex_unlock(&ps->smi_mutex);
2210 return ret;
2211}
2212
Andrew Lunndbde9e62015-05-06 01:09:48 +02002213int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2214{
2215 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2216 int ret;
2217 int i;
2218
2219 for (i = 0; i < ps->num_ports; i++) {
2220 ret = mv88e6xxx_setup_port(ds, i);
2221 if (ret < 0)
2222 return ret;
2223 }
2224 return 0;
2225}
2226
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002227static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
2228{
2229 struct dsa_switch *ds = s->private;
2230
2231 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2232 int reg, port;
2233
2234 seq_puts(s, " GLOBAL GLOBAL2 ");
2235 for (port = 0 ; port < ps->num_ports; port++)
2236 seq_printf(s, " %2d ", port);
2237 seq_puts(s, "\n");
2238
2239 for (reg = 0; reg < 32; reg++) {
2240 seq_printf(s, "%2x: ", reg);
2241 seq_printf(s, " %4x %4x ",
2242 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
2243 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
2244
2245 for (port = 0 ; port < ps->num_ports; port++)
2246 seq_printf(s, "%4x ",
2247 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
2248 seq_puts(s, "\n");
2249 }
2250
2251 return 0;
2252}
2253
2254static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
2255{
2256 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
2257}
2258
2259static const struct file_operations mv88e6xxx_regs_fops = {
2260 .open = mv88e6xxx_regs_open,
2261 .read = seq_read,
2262 .llseek = no_llseek,
2263 .release = single_release,
2264 .owner = THIS_MODULE,
2265};
2266
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002267static void mv88e6xxx_atu_show_header(struct seq_file *s)
2268{
2269 seq_puts(s, "DB T/P Vec State Addr\n");
2270}
2271
2272static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
2273 unsigned char *addr, int data)
2274{
2275 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
2276 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
2277 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
2278 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
2279
2280 seq_printf(s, "%03x %5s %10pb %x %pM\n",
2281 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
2282}
2283
2284static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
2285 int dbnum)
2286{
2287 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2288 unsigned char addr[6];
2289 int ret, data, state;
2290
Vivien Didelotc5723ac2015-08-10 09:09:48 -04002291 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002292 if (ret < 0)
2293 return ret;
2294
2295 do {
2296 ret = _mv88e6xxx_atu_cmd(ds, dbnum, GLOBAL_ATU_OP_GET_NEXT_DB);
2297 if (ret < 0)
2298 return ret;
2299 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
2300 if (data < 0)
2301 return data;
2302
2303 state = data & GLOBAL_ATU_DATA_STATE_MASK;
2304 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
2305 break;
Vivien Didelotc5723ac2015-08-10 09:09:48 -04002306 ret = _mv88e6xxx_atu_mac_read(ds, addr);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002307 if (ret < 0)
2308 return ret;
2309 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
2310 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
2311
2312 return 0;
2313}
2314
2315static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
2316{
2317 struct dsa_switch *ds = s->private;
2318 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2319 int dbnum;
2320
2321 mv88e6xxx_atu_show_header(s);
2322
2323 for (dbnum = 0; dbnum < 255; dbnum++) {
2324 mutex_lock(&ps->smi_mutex);
2325 mv88e6xxx_atu_show_db(s, ds, dbnum);
2326 mutex_unlock(&ps->smi_mutex);
2327 }
2328
2329 return 0;
2330}
2331
2332static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
2333{
2334 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
2335}
2336
2337static const struct file_operations mv88e6xxx_atu_fops = {
2338 .open = mv88e6xxx_atu_open,
2339 .read = seq_read,
2340 .llseek = no_llseek,
2341 .release = single_release,
2342 .owner = THIS_MODULE,
2343};
2344
Andrew Lunn532c7a32015-06-20 18:42:31 +02002345static void mv88e6xxx_stats_show_header(struct seq_file *s,
2346 struct mv88e6xxx_priv_state *ps)
2347{
2348 int port;
2349
2350 seq_puts(s, " Statistic ");
2351 for (port = 0 ; port < ps->num_ports; port++)
2352 seq_printf(s, "Port %2d ", port);
2353 seq_puts(s, "\n");
2354}
2355
2356static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
2357{
2358 struct dsa_switch *ds = s->private;
2359 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2360 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
2361 int port, stat, max_stats;
2362 uint64_t value;
2363
2364 if (have_sw_in_discards(ds))
2365 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
2366 else
2367 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
2368
2369 mv88e6xxx_stats_show_header(s, ps);
2370
2371 mutex_lock(&ps->smi_mutex);
2372
2373 for (stat = 0; stat < max_stats; stat++) {
2374 seq_printf(s, "%19s: ", stats[stat].string);
2375 for (port = 0 ; port < ps->num_ports; port++) {
2376 _mv88e6xxx_stats_snapshot(ds, port);
2377 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
2378 port);
2379 seq_printf(s, "%8llu ", value);
2380 }
2381 seq_puts(s, "\n");
2382 }
2383 mutex_unlock(&ps->smi_mutex);
2384
2385 return 0;
2386}
2387
2388static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
2389{
2390 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
2391}
2392
2393static const struct file_operations mv88e6xxx_stats_fops = {
2394 .open = mv88e6xxx_stats_open,
2395 .read = seq_read,
2396 .llseek = no_llseek,
2397 .release = single_release,
2398 .owner = THIS_MODULE,
2399};
2400
Andrew Lunnd35bd872015-06-20 18:42:32 +02002401static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
2402{
2403 struct dsa_switch *ds = s->private;
2404 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2405 int target, ret;
2406
2407 seq_puts(s, "Target Port\n");
2408
2409 mutex_lock(&ps->smi_mutex);
2410 for (target = 0; target < 32; target++) {
2411 ret = _mv88e6xxx_reg_write(
2412 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2413 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
2414 if (ret < 0)
2415 goto out;
2416 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2417 GLOBAL2_DEVICE_MAPPING);
2418 seq_printf(s, " %2d %2d\n", target,
2419 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
2420 }
2421out:
2422 mutex_unlock(&ps->smi_mutex);
2423
2424 return 0;
2425}
2426
2427static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
2428{
2429 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
2430}
2431
2432static const struct file_operations mv88e6xxx_device_map_fops = {
2433 .open = mv88e6xxx_device_map_open,
2434 .read = seq_read,
2435 .llseek = no_llseek,
2436 .release = single_release,
2437 .owner = THIS_MODULE,
2438};
2439
Andrew Lunn56d95e22015-06-20 18:42:33 +02002440static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
2441{
2442 struct dsa_switch *ds = s->private;
2443 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2444 int reg, ret;
2445
2446 seq_puts(s, "Register Value\n");
2447
2448 mutex_lock(&ps->smi_mutex);
2449 for (reg = 0; reg < 0x80; reg++) {
2450 ret = _mv88e6xxx_reg_write(
2451 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
2452 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
2453 if (ret < 0)
2454 goto out;
2455
2456 ret = _mv88e6xxx_scratch_wait(ds);
2457 if (ret < 0)
2458 goto out;
2459
2460 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2461 GLOBAL2_SCRATCH_MISC);
2462 seq_printf(s, " %2x %2x\n", reg,
2463 ret & GLOBAL2_SCRATCH_VALUE_MASK);
2464 }
2465out:
2466 mutex_unlock(&ps->smi_mutex);
2467
2468 return 0;
2469}
2470
2471static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
2472{
2473 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
2474}
2475
2476static const struct file_operations mv88e6xxx_scratch_fops = {
2477 .open = mv88e6xxx_scratch_open,
2478 .read = seq_read,
2479 .llseek = no_llseek,
2480 .release = single_release,
2481 .owner = THIS_MODULE,
2482};
2483
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002484int mv88e6xxx_setup_common(struct dsa_switch *ds)
2485{
2486 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002487 char *name;
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002488
2489 mutex_init(&ps->smi_mutex);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002490
Andrew Lunncca8b132015-04-02 04:06:39 +02002491 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
Andrew Lunna8f064c2015-03-26 18:36:40 -07002492
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002493 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2494
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002495 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
2496 ps->dbgfs = debugfs_create_dir(name, NULL);
2497 kfree(name);
2498
2499 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
2500 &mv88e6xxx_regs_fops);
2501
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002502 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
2503 &mv88e6xxx_atu_fops);
2504
Andrew Lunn532c7a32015-06-20 18:42:31 +02002505 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
2506 &mv88e6xxx_stats_fops);
2507
Andrew Lunnd35bd872015-06-20 18:42:32 +02002508 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
2509 &mv88e6xxx_device_map_fops);
Andrew Lunn56d95e22015-06-20 18:42:33 +02002510
2511 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
2512 &mv88e6xxx_scratch_fops);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002513 return 0;
2514}
2515
Andrew Lunn54d792f2015-05-06 01:09:47 +02002516int mv88e6xxx_setup_global(struct dsa_switch *ds)
2517{
2518 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot24751e22015-08-03 09:17:44 -04002519 int ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002520 int i;
2521
2522 /* Set the default address aging time to 5 minutes, and
2523 * enable address learn messages to be sent to all message
2524 * ports.
2525 */
2526 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2527 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2528
2529 /* Configure the IP ToS mapping registers. */
2530 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2531 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2532 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2533 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2534 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2535 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2536 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2537 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2538
2539 /* Configure the IEEE 802.1p priority mapping register. */
2540 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2541
2542 /* Send all frames with destination addresses matching
2543 * 01:80:c2:00:00:0x to the CPU port.
2544 */
2545 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2546
2547 /* Ignore removed tag data on doubly tagged packets, disable
2548 * flow control messages, force flow control priority to the
2549 * highest, and send all special multicast frames to the CPU
2550 * port at the highest priority.
2551 */
2552 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2553 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2554 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2555
2556 /* Program the DSA routing table. */
2557 for (i = 0; i < 32; i++) {
2558 int nexthop = 0x1f;
2559
2560 if (ds->pd->rtable &&
2561 i != ds->index && i < ds->dst->pd->nr_chips)
2562 nexthop = ds->pd->rtable[i] & 0x1f;
2563
2564 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2565 GLOBAL2_DEVICE_MAPPING_UPDATE |
2566 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2567 nexthop);
2568 }
2569
2570 /* Clear all trunk masks. */
2571 for (i = 0; i < 8; i++)
2572 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2573 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2574 ((1 << ps->num_ports) - 1));
2575
2576 /* Clear all trunk mappings. */
2577 for (i = 0; i < 16; i++)
2578 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2579 GLOBAL2_TRUNK_MAPPING_UPDATE |
2580 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2581
2582 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002583 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2584 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002585 /* Send all frames with destination addresses matching
2586 * 01:80:c2:00:00:2x to the CPU port.
2587 */
2588 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2589
2590 /* Initialise cross-chip port VLAN table to reset
2591 * defaults.
2592 */
2593 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2594
2595 /* Clear the priority override table. */
2596 for (i = 0; i < 16; i++)
2597 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2598 0x8000 | (i << 8));
2599 }
2600
2601 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2602 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002603 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2604 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002605 /* Disable ingress rate limiting by resetting all
2606 * ingress rate limit registers to their initial
2607 * state.
2608 */
2609 for (i = 0; i < ps->num_ports; i++)
2610 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2611 0x9000 | (i << 8));
2612 }
2613
Andrew Lunndb687a52015-06-20 21:31:29 +02002614 /* Clear the statistics counters for all ports */
2615 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2616
2617 /* Wait for the flush to complete. */
Vivien Didelot24751e22015-08-03 09:17:44 -04002618 mutex_lock(&ps->smi_mutex);
2619 ret = _mv88e6xxx_stats_wait(ds);
Vivien Didelot6b17e862015-08-13 12:52:18 -04002620 if (ret < 0)
2621 goto unlock;
2622
2623 /* Clear all the VTU and STU entries */
2624 ret = _mv88e6xxx_vtu_stu_flush(ds);
2625unlock:
Vivien Didelot24751e22015-08-03 09:17:44 -04002626 mutex_unlock(&ps->smi_mutex);
Andrew Lunndb687a52015-06-20 21:31:29 +02002627
Vivien Didelot24751e22015-08-03 09:17:44 -04002628 return ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002629}
2630
Andrew Lunn143a8302015-04-02 04:06:34 +02002631int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2632{
2633 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2634 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2635 unsigned long timeout;
2636 int ret;
2637 int i;
2638
2639 /* Set all ports to the disabled state. */
2640 for (i = 0; i < ps->num_ports; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02002641 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2642 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
Andrew Lunn143a8302015-04-02 04:06:34 +02002643 }
2644
2645 /* Wait for transmit queues to drain. */
2646 usleep_range(2000, 4000);
2647
2648 /* Reset the switch. Keep the PPU active if requested. The PPU
2649 * needs to be active to support indirect phy register access
2650 * through global registers 0x18 and 0x19.
2651 */
2652 if (ppu_active)
2653 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2654 else
2655 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2656
2657 /* Wait up to one second for reset to complete. */
2658 timeout = jiffies + 1 * HZ;
2659 while (time_before(jiffies, timeout)) {
2660 ret = REG_READ(REG_GLOBAL, 0x00);
2661 if ((ret & is_reset) == is_reset)
2662 break;
2663 usleep_range(1000, 2000);
2664 }
2665 if (time_after(jiffies, timeout))
2666 return -ETIMEDOUT;
2667
2668 return 0;
2669}
2670
Andrew Lunn491435852015-04-02 04:06:35 +02002671int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2672{
2673 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2674 int ret;
2675
Andrew Lunn3898c142015-05-06 01:09:53 +02002676 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002677 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002678 if (ret < 0)
2679 goto error;
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002680 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
Andrew Lunn491435852015-04-02 04:06:35 +02002681error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002682 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002683 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002684 return ret;
2685}
2686
2687int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2688 int reg, int val)
2689{
2690 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2691 int ret;
2692
Andrew Lunn3898c142015-05-06 01:09:53 +02002693 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002694 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002695 if (ret < 0)
2696 goto error;
2697
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002698 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
Andrew Lunn491435852015-04-02 04:06:35 +02002699error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002700 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002701 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002702 return ret;
2703}
2704
2705static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2706{
2707 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2708
2709 if (port >= 0 && port < ps->num_ports)
2710 return port;
2711 return -EINVAL;
2712}
2713
2714int
2715mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2716{
2717 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2718 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2719 int ret;
2720
2721 if (addr < 0)
2722 return addr;
2723
Andrew Lunn3898c142015-05-06 01:09:53 +02002724 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002725 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002726 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002727 return ret;
2728}
2729
2730int
2731mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2732{
2733 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2734 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2735 int ret;
2736
2737 if (addr < 0)
2738 return addr;
2739
Andrew Lunn3898c142015-05-06 01:09:53 +02002740 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002741 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002742 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002743 return ret;
2744}
2745
2746int
2747mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2748{
2749 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2750 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2751 int ret;
2752
2753 if (addr < 0)
2754 return addr;
2755
Andrew Lunn3898c142015-05-06 01:09:53 +02002756 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002757 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002758 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002759 return ret;
2760}
2761
2762int
2763mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2764 u16 val)
2765{
2766 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2767 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2768 int ret;
2769
2770 if (addr < 0)
2771 return addr;
2772
Andrew Lunn3898c142015-05-06 01:09:53 +02002773 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002774 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002775 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002776 return ret;
2777}
2778
Guenter Roeckc22995c2015-07-25 09:42:28 -07002779#ifdef CONFIG_NET_DSA_HWMON
2780
2781static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2782{
2783 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2784 int ret;
2785 int val;
2786
2787 *temp = 0;
2788
2789 mutex_lock(&ps->smi_mutex);
2790
2791 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2792 if (ret < 0)
2793 goto error;
2794
2795 /* Enable temperature sensor */
2796 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2797 if (ret < 0)
2798 goto error;
2799
2800 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2801 if (ret < 0)
2802 goto error;
2803
2804 /* Wait for temperature to stabilize */
2805 usleep_range(10000, 12000);
2806
2807 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2808 if (val < 0) {
2809 ret = val;
2810 goto error;
2811 }
2812
2813 /* Disable temperature sensor */
2814 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2815 if (ret < 0)
2816 goto error;
2817
2818 *temp = ((val & 0x1f) - 5) * 5;
2819
2820error:
2821 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2822 mutex_unlock(&ps->smi_mutex);
2823 return ret;
2824}
2825
2826static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2827{
2828 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2829 int ret;
2830
2831 *temp = 0;
2832
2833 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2834 if (ret < 0)
2835 return ret;
2836
2837 *temp = (ret & 0xff) - 25;
2838
2839 return 0;
2840}
2841
2842int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2843{
2844 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2845 return mv88e63xx_get_temp(ds, temp);
2846
2847 return mv88e61xx_get_temp(ds, temp);
2848}
2849
2850int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2851{
2852 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2853 int ret;
2854
2855 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2856 return -EOPNOTSUPP;
2857
2858 *temp = 0;
2859
2860 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2861 if (ret < 0)
2862 return ret;
2863
2864 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2865
2866 return 0;
2867}
2868
2869int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2870{
2871 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2872 int ret;
2873
2874 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2875 return -EOPNOTSUPP;
2876
2877 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2878 if (ret < 0)
2879 return ret;
2880 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2881 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2882 (ret & 0xe0ff) | (temp << 8));
2883}
2884
2885int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2886{
2887 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2888 int ret;
2889
2890 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2891 return -EOPNOTSUPP;
2892
2893 *alarm = false;
2894
2895 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2896 if (ret < 0)
2897 return ret;
2898
2899 *alarm = !!(ret & 0x40);
2900
2901 return 0;
2902}
2903#endif /* CONFIG_NET_DSA_HWMON */
2904
Ben Hutchings98e67302011-11-25 14:36:19 +00002905static int __init mv88e6xxx_init(void)
2906{
2907#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2908 register_switch_driver(&mv88e6131_switch_driver);
2909#endif
2910#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2911 register_switch_driver(&mv88e6123_61_65_switch_driver);
2912#endif
Guenter Roeck3ad50cc2014-10-29 10:44:56 -07002913#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2914 register_switch_driver(&mv88e6352_switch_driver);
2915#endif
Andrew Lunn42f27252014-09-12 23:58:44 +02002916#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2917 register_switch_driver(&mv88e6171_switch_driver);
2918#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002919 return 0;
2920}
2921module_init(mv88e6xxx_init);
2922
2923static void __exit mv88e6xxx_cleanup(void)
2924{
Andrew Lunn42f27252014-09-12 23:58:44 +02002925#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2926 unregister_switch_driver(&mv88e6171_switch_driver);
2927#endif
Vivien Didelot4212b542015-05-01 10:43:52 -04002928#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2929 unregister_switch_driver(&mv88e6352_switch_driver);
2930#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002931#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2932 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2933#endif
2934#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2935 unregister_switch_driver(&mv88e6131_switch_driver);
2936#endif
2937}
2938module_exit(mv88e6xxx_cleanup);
Ben Hutchings3d825ed2011-11-25 14:37:16 +00002939
2940MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2941MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2942MODULE_LICENSE("GPL");