blob: 87b405e4f9f6432ff6e7bfbc1502449c6e2762e9 [file] [log] [blame]
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00001/*
2 * net/dsa/mv88e6xxx.c - Marvell 88e6xxx switch chip support
3 * Copyright (c) 2008 Marvell Semiconductor
4 *
Vivien Didelotb8fee952015-08-13 12:52:19 -04005 * Copyright (c) 2015 CMC Electronics, Inc.
6 * Added support for VLAN Table Unit operations
7 *
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00008 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
Andrew Lunn87c8cef2015-06-20 18:42:28 +020014#include <linux/debugfs.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000015#include <linux/delay.h>
Guenter Roeckdefb05b2015-03-26 18:36:38 -070016#include <linux/etherdevice.h>
Andrew Lunndea87022015-08-31 15:56:47 +020017#include <linux/ethtool.h>
Guenter Roeckfacd95b2015-03-26 18:36:35 -070018#include <linux/if_bridge.h>
Barry Grussling19b2f972013-01-08 16:05:54 +000019#include <linux/jiffies.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000020#include <linux/list.h>
Paul Gortmaker2bbba272012-01-24 10:41:40 +000021#include <linux/module.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000022#include <linux/netdevice.h>
23#include <linux/phy.h>
Andrew Lunn87c8cef2015-06-20 18:42:28 +020024#include <linux/seq_file.h>
Ben Hutchingsc8f0b862011-11-27 17:06:08 +000025#include <net/dsa.h>
Vivien Didelot1f36faf2015-10-08 11:35:13 -040026#include <net/switchdev.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000027#include "mv88e6xxx.h"
28
Andrew Lunn16fe24f2015-05-06 01:09:55 +020029/* MDIO bus access can be nested in the case of PHYs connected to the
30 * internal MDIO bus of the switch, which is accessed via MDIO bus of
31 * the Ethernet interface. Avoid lockdep false positives by using
32 * mutex_lock_nested().
33 */
34static int mv88e6xxx_mdiobus_read(struct mii_bus *bus, int addr, u32 regnum)
35{
36 int ret;
37
38 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
39 ret = bus->read(bus, addr, regnum);
40 mutex_unlock(&bus->mdio_lock);
41
42 return ret;
43}
44
45static int mv88e6xxx_mdiobus_write(struct mii_bus *bus, int addr, u32 regnum,
46 u16 val)
47{
48 int ret;
49
50 mutex_lock_nested(&bus->mdio_lock, SINGLE_DEPTH_NESTING);
51 ret = bus->write(bus, addr, regnum, val);
52 mutex_unlock(&bus->mdio_lock);
53
54 return ret;
55}
56
Barry Grussling3675c8d2013-01-08 16:05:53 +000057/* If the switch's ADDR[4:0] strap pins are strapped to zero, it will
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000058 * use all 32 SMI bus addresses on its SMI bus, and all switch registers
59 * will be directly accessible on some {device address,register address}
60 * pair. If the ADDR[4:0] pins are not strapped to zero, the switch
61 * will only respond to SMI transactions to that specific address, and
62 * an indirect addressing mechanism needs to be used to access its
63 * registers.
64 */
65static int mv88e6xxx_reg_wait_ready(struct mii_bus *bus, int sw_addr)
66{
67 int ret;
68 int i;
69
70 for (i = 0; i < 16; i++) {
Andrew Lunn16fe24f2015-05-06 01:09:55 +020071 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_CMD);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000072 if (ret < 0)
73 return ret;
74
Andrew Lunncca8b132015-04-02 04:06:39 +020075 if ((ret & SMI_CMD_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000076 return 0;
77 }
78
79 return -ETIMEDOUT;
80}
81
82int __mv88e6xxx_reg_read(struct mii_bus *bus, int sw_addr, int addr, int reg)
83{
84 int ret;
85
86 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +020087 return mv88e6xxx_mdiobus_read(bus, addr, reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000088
Barry Grussling3675c8d2013-01-08 16:05:53 +000089 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000090 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
91 if (ret < 0)
92 return ret;
93
Barry Grussling3675c8d2013-01-08 16:05:53 +000094 /* Transmit the read command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +020095 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
96 SMI_CMD_OP_22_READ | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000097 if (ret < 0)
98 return ret;
99
Barry Grussling3675c8d2013-01-08 16:05:53 +0000100 /* Wait for the read command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000101 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
102 if (ret < 0)
103 return ret;
104
Barry Grussling3675c8d2013-01-08 16:05:53 +0000105 /* Read the data. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200106 ret = mv88e6xxx_mdiobus_read(bus, sw_addr, SMI_DATA);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000107 if (ret < 0)
108 return ret;
109
110 return ret & 0xffff;
111}
112
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700113/* Must be called with SMI mutex held */
114static int _mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000115{
Guenter Roeckb184e492014-10-17 12:30:58 -0700116 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000117 int ret;
118
Guenter Roeckb184e492014-10-17 12:30:58 -0700119 if (bus == NULL)
120 return -EINVAL;
121
Guenter Roeckb184e492014-10-17 12:30:58 -0700122 ret = __mv88e6xxx_reg_read(bus, ds->pd->sw_addr, addr, reg);
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500123 if (ret < 0)
124 return ret;
125
126 dev_dbg(ds->master_dev, "<- addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
127 addr, reg, ret);
128
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000129 return ret;
130}
131
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700132int mv88e6xxx_reg_read(struct dsa_switch *ds, int addr, int reg)
133{
134 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
135 int ret;
136
137 mutex_lock(&ps->smi_mutex);
138 ret = _mv88e6xxx_reg_read(ds, addr, reg);
139 mutex_unlock(&ps->smi_mutex);
140
141 return ret;
142}
143
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000144int __mv88e6xxx_reg_write(struct mii_bus *bus, int sw_addr, int addr,
145 int reg, u16 val)
146{
147 int ret;
148
149 if (sw_addr == 0)
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200150 return mv88e6xxx_mdiobus_write(bus, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000151
Barry Grussling3675c8d2013-01-08 16:05:53 +0000152 /* Wait for the bus to become free. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000153 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
154 if (ret < 0)
155 return ret;
156
Barry Grussling3675c8d2013-01-08 16:05:53 +0000157 /* Transmit the data to write. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200158 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_DATA, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000159 if (ret < 0)
160 return ret;
161
Barry Grussling3675c8d2013-01-08 16:05:53 +0000162 /* Transmit the write command. */
Andrew Lunn16fe24f2015-05-06 01:09:55 +0200163 ret = mv88e6xxx_mdiobus_write(bus, sw_addr, SMI_CMD,
164 SMI_CMD_OP_22_WRITE | (addr << 5) | reg);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000165 if (ret < 0)
166 return ret;
167
Barry Grussling3675c8d2013-01-08 16:05:53 +0000168 /* Wait for the write command to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000169 ret = mv88e6xxx_reg_wait_ready(bus, sw_addr);
170 if (ret < 0)
171 return ret;
172
173 return 0;
174}
175
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700176/* Must be called with SMI mutex held */
177static int _mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg,
178 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000179{
Guenter Roeckb184e492014-10-17 12:30:58 -0700180 struct mii_bus *bus = dsa_host_dev_to_mii_bus(ds->master_dev);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000181
Guenter Roeckb184e492014-10-17 12:30:58 -0700182 if (bus == NULL)
183 return -EINVAL;
184
Vivien Didelotbb92ea52015-01-23 16:10:36 -0500185 dev_dbg(ds->master_dev, "-> addr: 0x%.2x reg: 0x%.2x val: 0x%.4x\n",
186 addr, reg, val);
187
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700188 return __mv88e6xxx_reg_write(bus, ds->pd->sw_addr, addr, reg, val);
189}
190
191int mv88e6xxx_reg_write(struct dsa_switch *ds, int addr, int reg, u16 val)
192{
193 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
194 int ret;
195
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000196 mutex_lock(&ps->smi_mutex);
Guenter Roeck8d6d09e2015-03-26 18:36:31 -0700197 ret = _mv88e6xxx_reg_write(ds, addr, reg, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000198 mutex_unlock(&ps->smi_mutex);
199
200 return ret;
201}
202
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000203int mv88e6xxx_set_addr_direct(struct dsa_switch *ds, u8 *addr)
204{
Andrew Lunncca8b132015-04-02 04:06:39 +0200205 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 8) | addr[1]);
206 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
207 REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000208
209 return 0;
210}
211
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000212int mv88e6xxx_set_addr_indirect(struct dsa_switch *ds, u8 *addr)
213{
214 int i;
215 int ret;
216
217 for (i = 0; i < 6; i++) {
218 int j;
219
Barry Grussling3675c8d2013-01-08 16:05:53 +0000220 /* Write the MAC address byte. */
Andrew Lunncca8b132015-04-02 04:06:39 +0200221 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MAC,
222 GLOBAL2_SWITCH_MAC_BUSY | (i << 8) | addr[i]);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000223
Barry Grussling3675c8d2013-01-08 16:05:53 +0000224 /* Wait for the write to complete. */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000225 for (j = 0; j < 16; j++) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200226 ret = REG_READ(REG_GLOBAL2, GLOBAL2_SWITCH_MAC);
227 if ((ret & GLOBAL2_SWITCH_MAC_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000228 break;
229 }
230 if (j == 16)
231 return -ETIMEDOUT;
232 }
233
234 return 0;
235}
236
Andrew Lunn3898c142015-05-06 01:09:53 +0200237/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200238static int _mv88e6xxx_phy_read(struct dsa_switch *ds, int addr, int regnum)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000239{
240 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200241 return _mv88e6xxx_reg_read(ds, addr, regnum);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000242 return 0xffff;
243}
244
Andrew Lunn3898c142015-05-06 01:09:53 +0200245/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200246static int _mv88e6xxx_phy_write(struct dsa_switch *ds, int addr, int regnum,
247 u16 val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000248{
249 if (addr >= 0)
Andrew Lunn3898c142015-05-06 01:09:53 +0200250 return _mv88e6xxx_reg_write(ds, addr, regnum, val);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000251 return 0;
252}
253
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000254#ifdef CONFIG_NET_DSA_MV88E6XXX_NEED_PPU
255static int mv88e6xxx_ppu_disable(struct dsa_switch *ds)
256{
257 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000258 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000259
Andrew Lunncca8b132015-04-02 04:06:39 +0200260 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
261 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL,
262 ret & ~GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000263
Barry Grussling19b2f972013-01-08 16:05:54 +0000264 timeout = jiffies + 1 * HZ;
265 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200266 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000267 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200268 if ((ret & GLOBAL_STATUS_PPU_MASK) !=
269 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000270 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000271 }
272
273 return -ETIMEDOUT;
274}
275
276static int mv88e6xxx_ppu_enable(struct dsa_switch *ds)
277{
278 int ret;
Barry Grussling19b2f972013-01-08 16:05:54 +0000279 unsigned long timeout;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000280
Andrew Lunncca8b132015-04-02 04:06:39 +0200281 ret = REG_READ(REG_GLOBAL, GLOBAL_CONTROL);
282 REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, ret | GLOBAL_CONTROL_PPU_ENABLE);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000283
Barry Grussling19b2f972013-01-08 16:05:54 +0000284 timeout = jiffies + 1 * HZ;
285 while (time_before(jiffies, timeout)) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200286 ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
Barry Grussling19b2f972013-01-08 16:05:54 +0000287 usleep_range(1000, 2000);
Andrew Lunncca8b132015-04-02 04:06:39 +0200288 if ((ret & GLOBAL_STATUS_PPU_MASK) ==
289 GLOBAL_STATUS_PPU_POLLING)
Barry Grussling85686582013-01-08 16:05:56 +0000290 return 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000291 }
292
293 return -ETIMEDOUT;
294}
295
296static void mv88e6xxx_ppu_reenable_work(struct work_struct *ugly)
297{
298 struct mv88e6xxx_priv_state *ps;
299
300 ps = container_of(ugly, struct mv88e6xxx_priv_state, ppu_work);
301 if (mutex_trylock(&ps->ppu_mutex)) {
Barry Grussling85686582013-01-08 16:05:56 +0000302 struct dsa_switch *ds = ((struct dsa_switch *)ps) - 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000303
Barry Grussling85686582013-01-08 16:05:56 +0000304 if (mv88e6xxx_ppu_enable(ds) == 0)
305 ps->ppu_disabled = 0;
306 mutex_unlock(&ps->ppu_mutex);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000307 }
308}
309
310static void mv88e6xxx_ppu_reenable_timer(unsigned long _ps)
311{
312 struct mv88e6xxx_priv_state *ps = (void *)_ps;
313
314 schedule_work(&ps->ppu_work);
315}
316
317static int mv88e6xxx_ppu_access_get(struct dsa_switch *ds)
318{
Florian Fainellia22adce2014-04-28 11:14:28 -0700319 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000320 int ret;
321
322 mutex_lock(&ps->ppu_mutex);
323
Barry Grussling3675c8d2013-01-08 16:05:53 +0000324 /* If the PHY polling unit is enabled, disable it so that
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000325 * we can access the PHY registers. If it was already
326 * disabled, cancel the timer that is going to re-enable
327 * it.
328 */
329 if (!ps->ppu_disabled) {
Barry Grussling85686582013-01-08 16:05:56 +0000330 ret = mv88e6xxx_ppu_disable(ds);
331 if (ret < 0) {
332 mutex_unlock(&ps->ppu_mutex);
333 return ret;
334 }
335 ps->ppu_disabled = 1;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000336 } else {
Barry Grussling85686582013-01-08 16:05:56 +0000337 del_timer(&ps->ppu_timer);
338 ret = 0;
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000339 }
340
341 return ret;
342}
343
344static void mv88e6xxx_ppu_access_put(struct dsa_switch *ds)
345{
Florian Fainellia22adce2014-04-28 11:14:28 -0700346 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000347
Barry Grussling3675c8d2013-01-08 16:05:53 +0000348 /* Schedule a timer to re-enable the PHY polling unit. */
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000349 mod_timer(&ps->ppu_timer, jiffies + msecs_to_jiffies(10));
350 mutex_unlock(&ps->ppu_mutex);
351}
352
353void mv88e6xxx_ppu_state_init(struct dsa_switch *ds)
354{
Florian Fainellia22adce2014-04-28 11:14:28 -0700355 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000356
357 mutex_init(&ps->ppu_mutex);
358 INIT_WORK(&ps->ppu_work, mv88e6xxx_ppu_reenable_work);
359 init_timer(&ps->ppu_timer);
360 ps->ppu_timer.data = (unsigned long)ps;
361 ps->ppu_timer.function = mv88e6xxx_ppu_reenable_timer;
362}
363
364int mv88e6xxx_phy_read_ppu(struct dsa_switch *ds, int addr, int regnum)
365{
366 int ret;
367
368 ret = mv88e6xxx_ppu_access_get(ds);
369 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000370 ret = mv88e6xxx_reg_read(ds, addr, regnum);
371 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000372 }
373
374 return ret;
375}
376
377int mv88e6xxx_phy_write_ppu(struct dsa_switch *ds, int addr,
378 int regnum, u16 val)
379{
380 int ret;
381
382 ret = mv88e6xxx_ppu_access_get(ds);
383 if (ret >= 0) {
Barry Grussling85686582013-01-08 16:05:56 +0000384 ret = mv88e6xxx_reg_write(ds, addr, regnum, val);
385 mv88e6xxx_ppu_access_put(ds);
Lennert Buytenhek2e5f0322008-10-07 13:45:18 +0000386 }
387
388 return ret;
389}
390#endif
391
Andrew Lunn54d792f2015-05-06 01:09:47 +0200392static bool mv88e6xxx_6065_family(struct dsa_switch *ds)
393{
394 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
395
396 switch (ps->id) {
397 case PORT_SWITCH_ID_6031:
398 case PORT_SWITCH_ID_6061:
399 case PORT_SWITCH_ID_6035:
400 case PORT_SWITCH_ID_6065:
401 return true;
402 }
403 return false;
404}
405
406static bool mv88e6xxx_6095_family(struct dsa_switch *ds)
407{
408 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
409
410 switch (ps->id) {
411 case PORT_SWITCH_ID_6092:
412 case PORT_SWITCH_ID_6095:
413 return true;
414 }
415 return false;
416}
417
418static bool mv88e6xxx_6097_family(struct dsa_switch *ds)
419{
420 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
421
422 switch (ps->id) {
423 case PORT_SWITCH_ID_6046:
424 case PORT_SWITCH_ID_6085:
425 case PORT_SWITCH_ID_6096:
426 case PORT_SWITCH_ID_6097:
427 return true;
428 }
429 return false;
430}
431
432static bool mv88e6xxx_6165_family(struct dsa_switch *ds)
433{
434 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
435
436 switch (ps->id) {
437 case PORT_SWITCH_ID_6123:
438 case PORT_SWITCH_ID_6161:
439 case PORT_SWITCH_ID_6165:
440 return true;
441 }
442 return false;
443}
444
445static bool mv88e6xxx_6185_family(struct dsa_switch *ds)
446{
447 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
448
449 switch (ps->id) {
450 case PORT_SWITCH_ID_6121:
451 case PORT_SWITCH_ID_6122:
452 case PORT_SWITCH_ID_6152:
453 case PORT_SWITCH_ID_6155:
454 case PORT_SWITCH_ID_6182:
455 case PORT_SWITCH_ID_6185:
456 case PORT_SWITCH_ID_6108:
457 case PORT_SWITCH_ID_6131:
458 return true;
459 }
460 return false;
461}
462
Guenter Roeckc22995c2015-07-25 09:42:28 -0700463static bool mv88e6xxx_6320_family(struct dsa_switch *ds)
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700464{
465 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
466
467 switch (ps->id) {
468 case PORT_SWITCH_ID_6320:
469 case PORT_SWITCH_ID_6321:
470 return true;
471 }
472 return false;
473}
474
Andrew Lunn54d792f2015-05-06 01:09:47 +0200475static bool mv88e6xxx_6351_family(struct dsa_switch *ds)
476{
477 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
478
479 switch (ps->id) {
480 case PORT_SWITCH_ID_6171:
481 case PORT_SWITCH_ID_6175:
482 case PORT_SWITCH_ID_6350:
483 case PORT_SWITCH_ID_6351:
484 return true;
485 }
486 return false;
487}
488
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200489static bool mv88e6xxx_6352_family(struct dsa_switch *ds)
490{
491 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
492
493 switch (ps->id) {
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200494 case PORT_SWITCH_ID_6172:
495 case PORT_SWITCH_ID_6176:
Andrew Lunn54d792f2015-05-06 01:09:47 +0200496 case PORT_SWITCH_ID_6240:
497 case PORT_SWITCH_ID_6352:
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200498 return true;
499 }
500 return false;
501}
502
Andrew Lunndea87022015-08-31 15:56:47 +0200503/* We expect the switch to perform auto negotiation if there is a real
504 * phy. However, in the case of a fixed link phy, we force the port
505 * settings from the fixed link settings.
506 */
507void mv88e6xxx_adjust_link(struct dsa_switch *ds, int port,
508 struct phy_device *phydev)
509{
510 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Andrew Lunn49052872015-09-29 01:53:48 +0200511 u32 reg;
512 int ret;
Andrew Lunndea87022015-08-31 15:56:47 +0200513
514 if (!phy_is_pseudo_fixed_link(phydev))
515 return;
516
517 mutex_lock(&ps->smi_mutex);
518
519 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
520 if (ret < 0)
521 goto out;
522
523 reg = ret & ~(PORT_PCS_CTRL_LINK_UP |
524 PORT_PCS_CTRL_FORCE_LINK |
525 PORT_PCS_CTRL_DUPLEX_FULL |
526 PORT_PCS_CTRL_FORCE_DUPLEX |
527 PORT_PCS_CTRL_UNFORCED);
528
529 reg |= PORT_PCS_CTRL_FORCE_LINK;
530 if (phydev->link)
531 reg |= PORT_PCS_CTRL_LINK_UP;
532
533 if (mv88e6xxx_6065_family(ds) && phydev->speed > SPEED_100)
534 goto out;
535
536 switch (phydev->speed) {
537 case SPEED_1000:
538 reg |= PORT_PCS_CTRL_1000;
539 break;
540 case SPEED_100:
541 reg |= PORT_PCS_CTRL_100;
542 break;
543 case SPEED_10:
544 reg |= PORT_PCS_CTRL_10;
545 break;
546 default:
547 pr_info("Unknown speed");
548 goto out;
549 }
550
551 reg |= PORT_PCS_CTRL_FORCE_DUPLEX;
552 if (phydev->duplex == DUPLEX_FULL)
553 reg |= PORT_PCS_CTRL_DUPLEX_FULL;
554
Andrew Lunne7e72ac2015-08-31 15:56:51 +0200555 if ((mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds)) &&
556 (port >= ps->num_ports - 2)) {
557 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
558 reg |= PORT_PCS_CTRL_RGMII_DELAY_RXCLK;
559 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
560 reg |= PORT_PCS_CTRL_RGMII_DELAY_TXCLK;
561 if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
562 reg |= (PORT_PCS_CTRL_RGMII_DELAY_RXCLK |
563 PORT_PCS_CTRL_RGMII_DELAY_TXCLK);
564 }
Andrew Lunndea87022015-08-31 15:56:47 +0200565 _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_PCS_CTRL, reg);
566
567out:
568 mutex_unlock(&ps->smi_mutex);
569}
570
Andrew Lunn31888232015-05-06 01:09:54 +0200571/* Must be called with SMI mutex held */
572static int _mv88e6xxx_stats_wait(struct dsa_switch *ds)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000573{
574 int ret;
575 int i;
576
577 for (i = 0; i < 10; i++) {
Andrew Lunn31888232015-05-06 01:09:54 +0200578 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_OP);
Andrew Lunncca8b132015-04-02 04:06:39 +0200579 if ((ret & GLOBAL_STATS_OP_BUSY) == 0)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000580 return 0;
581 }
582
583 return -ETIMEDOUT;
584}
585
Andrew Lunn31888232015-05-06 01:09:54 +0200586/* Must be called with SMI mutex held */
587static int _mv88e6xxx_stats_snapshot(struct dsa_switch *ds, int port)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000588{
589 int ret;
590
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -0700591 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
Andrew Lunnf3a8b6b2015-04-02 04:06:40 +0200592 port = (port + 1) << 5;
593
Barry Grussling3675c8d2013-01-08 16:05:53 +0000594 /* Snapshot the hardware statistics counters for this port. */
Andrew Lunn31888232015-05-06 01:09:54 +0200595 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
596 GLOBAL_STATS_OP_CAPTURE_PORT |
597 GLOBAL_STATS_OP_HIST_RX_TX | port);
598 if (ret < 0)
599 return ret;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000600
Barry Grussling3675c8d2013-01-08 16:05:53 +0000601 /* Wait for the snapshotting to complete. */
Andrew Lunn31888232015-05-06 01:09:54 +0200602 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000603 if (ret < 0)
604 return ret;
605
606 return 0;
607}
608
Andrew Lunn31888232015-05-06 01:09:54 +0200609/* Must be called with SMI mutex held */
610static void _mv88e6xxx_stats_read(struct dsa_switch *ds, int stat, u32 *val)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000611{
612 u32 _val;
613 int ret;
614
615 *val = 0;
616
Andrew Lunn31888232015-05-06 01:09:54 +0200617 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_STATS_OP,
618 GLOBAL_STATS_OP_READ_CAPTURED |
619 GLOBAL_STATS_OP_HIST_RX_TX | stat);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000620 if (ret < 0)
621 return;
622
Andrew Lunn31888232015-05-06 01:09:54 +0200623 ret = _mv88e6xxx_stats_wait(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000624 if (ret < 0)
625 return;
626
Andrew Lunn31888232015-05-06 01:09:54 +0200627 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_32);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000628 if (ret < 0)
629 return;
630
631 _val = ret << 16;
632
Andrew Lunn31888232015-05-06 01:09:54 +0200633 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_STATS_COUNTER_01);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000634 if (ret < 0)
635 return;
636
637 *val = _val | ret;
638}
639
Andrew Lunne413e7e2015-04-02 04:06:38 +0200640static struct mv88e6xxx_hw_stat mv88e6xxx_hw_stats[] = {
641 { "in_good_octets", 8, 0x00, },
642 { "in_bad_octets", 4, 0x02, },
643 { "in_unicast", 4, 0x04, },
644 { "in_broadcasts", 4, 0x06, },
645 { "in_multicasts", 4, 0x07, },
646 { "in_pause", 4, 0x16, },
647 { "in_undersize", 4, 0x18, },
648 { "in_fragments", 4, 0x19, },
649 { "in_oversize", 4, 0x1a, },
650 { "in_jabber", 4, 0x1b, },
651 { "in_rx_error", 4, 0x1c, },
652 { "in_fcs_error", 4, 0x1d, },
653 { "out_octets", 8, 0x0e, },
654 { "out_unicast", 4, 0x10, },
655 { "out_broadcasts", 4, 0x13, },
656 { "out_multicasts", 4, 0x12, },
657 { "out_pause", 4, 0x15, },
658 { "excessive", 4, 0x11, },
659 { "collisions", 4, 0x1e, },
660 { "deferred", 4, 0x05, },
661 { "single", 4, 0x14, },
662 { "multiple", 4, 0x17, },
663 { "out_fcs_error", 4, 0x03, },
664 { "late", 4, 0x1f, },
665 { "hist_64bytes", 4, 0x08, },
666 { "hist_65_127bytes", 4, 0x09, },
667 { "hist_128_255bytes", 4, 0x0a, },
668 { "hist_256_511bytes", 4, 0x0b, },
669 { "hist_512_1023bytes", 4, 0x0c, },
670 { "hist_1024_max_bytes", 4, 0x0d, },
671 /* Not all devices have the following counters */
672 { "sw_in_discards", 4, 0x110, },
673 { "sw_in_filtered", 2, 0x112, },
674 { "sw_out_filtered", 2, 0x113, },
675
676};
677
678static bool have_sw_in_discards(struct dsa_switch *ds)
679{
680 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
681
682 switch (ps->id) {
Andrew Lunncca8b132015-04-02 04:06:39 +0200683 case PORT_SWITCH_ID_6095: case PORT_SWITCH_ID_6161:
684 case PORT_SWITCH_ID_6165: case PORT_SWITCH_ID_6171:
685 case PORT_SWITCH_ID_6172: case PORT_SWITCH_ID_6176:
686 case PORT_SWITCH_ID_6182: case PORT_SWITCH_ID_6185:
687 case PORT_SWITCH_ID_6352:
Andrew Lunne413e7e2015-04-02 04:06:38 +0200688 return true;
689 default:
690 return false;
691 }
692}
693
694static void _mv88e6xxx_get_strings(struct dsa_switch *ds,
695 int nr_stats,
696 struct mv88e6xxx_hw_stat *stats,
697 int port, uint8_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000698{
699 int i;
700
701 for (i = 0; i < nr_stats; i++) {
702 memcpy(data + i * ETH_GSTRING_LEN,
703 stats[i].string, ETH_GSTRING_LEN);
704 }
705}
706
Andrew Lunn80c46272015-06-20 18:42:30 +0200707static uint64_t _mv88e6xxx_get_ethtool_stat(struct dsa_switch *ds,
708 int stat,
709 struct mv88e6xxx_hw_stat *stats,
710 int port)
711{
712 struct mv88e6xxx_hw_stat *s = stats + stat;
713 u32 low;
714 u32 high = 0;
715 int ret;
716 u64 value;
717
718 if (s->reg >= 0x100) {
719 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
720 s->reg - 0x100);
721 if (ret < 0)
722 return UINT64_MAX;
723
724 low = ret;
725 if (s->sizeof_stat == 4) {
726 ret = _mv88e6xxx_reg_read(ds, REG_PORT(port),
727 s->reg - 0x100 + 1);
728 if (ret < 0)
729 return UINT64_MAX;
730 high = ret;
731 }
732 } else {
733 _mv88e6xxx_stats_read(ds, s->reg, &low);
734 if (s->sizeof_stat == 8)
735 _mv88e6xxx_stats_read(ds, s->reg + 1, &high);
736 }
737 value = (((u64)high) << 16) | low;
738 return value;
739}
740
Andrew Lunne413e7e2015-04-02 04:06:38 +0200741static void _mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
742 int nr_stats,
743 struct mv88e6xxx_hw_stat *stats,
744 int port, uint64_t *data)
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000745{
Florian Fainellia22adce2014-04-28 11:14:28 -0700746 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000747 int ret;
748 int i;
749
Andrew Lunn31888232015-05-06 01:09:54 +0200750 mutex_lock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000751
Andrew Lunn31888232015-05-06 01:09:54 +0200752 ret = _mv88e6xxx_stats_snapshot(ds, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000753 if (ret < 0) {
Andrew Lunn31888232015-05-06 01:09:54 +0200754 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000755 return;
756 }
757
Barry Grussling3675c8d2013-01-08 16:05:53 +0000758 /* Read each of the counters. */
Andrew Lunn80c46272015-06-20 18:42:30 +0200759 for (i = 0; i < nr_stats; i++)
760 data[i] = _mv88e6xxx_get_ethtool_stat(ds, i, stats, port);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000761
Andrew Lunn31888232015-05-06 01:09:54 +0200762 mutex_unlock(&ps->smi_mutex);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000763}
Ben Hutchings98e67302011-11-25 14:36:19 +0000764
Andrew Lunne413e7e2015-04-02 04:06:38 +0200765/* All the statistics in the table */
766void
767mv88e6xxx_get_strings(struct dsa_switch *ds, int port, uint8_t *data)
768{
769 if (have_sw_in_discards(ds))
770 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
771 mv88e6xxx_hw_stats, port, data);
772 else
773 _mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
774 mv88e6xxx_hw_stats, port, data);
775}
776
777int mv88e6xxx_get_sset_count(struct dsa_switch *ds)
778{
779 if (have_sw_in_discards(ds))
780 return ARRAY_SIZE(mv88e6xxx_hw_stats);
781 return ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
782}
783
784void
785mv88e6xxx_get_ethtool_stats(struct dsa_switch *ds,
786 int port, uint64_t *data)
787{
788 if (have_sw_in_discards(ds))
789 _mv88e6xxx_get_ethtool_stats(
790 ds, ARRAY_SIZE(mv88e6xxx_hw_stats),
791 mv88e6xxx_hw_stats, port, data);
792 else
793 _mv88e6xxx_get_ethtool_stats(
794 ds, ARRAY_SIZE(mv88e6xxx_hw_stats) - 3,
795 mv88e6xxx_hw_stats, port, data);
796}
797
Guenter Roecka1ab91f2014-10-29 10:45:05 -0700798int mv88e6xxx_get_regs_len(struct dsa_switch *ds, int port)
799{
800 return 32 * sizeof(u16);
801}
802
803void mv88e6xxx_get_regs(struct dsa_switch *ds, int port,
804 struct ethtool_regs *regs, void *_p)
805{
806 u16 *p = _p;
807 int i;
808
809 regs->version = 0;
810
811 memset(p, 0xff, 32 * sizeof(u16));
812
813 for (i = 0; i < 32; i++) {
814 int ret;
815
816 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), i);
817 if (ret >= 0)
818 p[i] = ret;
819 }
820}
821
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700822/* Must be called with SMI lock held */
Andrew Lunn3898c142015-05-06 01:09:53 +0200823static int _mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset,
824 u16 mask)
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700825{
826 unsigned long timeout = jiffies + HZ / 10;
827
828 while (time_before(jiffies, timeout)) {
829 int ret;
830
831 ret = _mv88e6xxx_reg_read(ds, reg, offset);
832 if (ret < 0)
833 return ret;
834 if (!(ret & mask))
835 return 0;
836
837 usleep_range(1000, 2000);
838 }
839 return -ETIMEDOUT;
840}
841
Andrew Lunn3898c142015-05-06 01:09:53 +0200842static int mv88e6xxx_wait(struct dsa_switch *ds, int reg, int offset, u16 mask)
843{
844 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
845 int ret;
846
847 mutex_lock(&ps->smi_mutex);
848 ret = _mv88e6xxx_wait(ds, reg, offset, mask);
849 mutex_unlock(&ps->smi_mutex);
850
851 return ret;
852}
853
854static int _mv88e6xxx_phy_wait(struct dsa_switch *ds)
855{
856 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
857 GLOBAL2_SMI_OP_BUSY);
858}
859
860int mv88e6xxx_eeprom_load_wait(struct dsa_switch *ds)
861{
862 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
863 GLOBAL2_EEPROM_OP_LOAD);
864}
865
866int mv88e6xxx_eeprom_busy_wait(struct dsa_switch *ds)
867{
868 return mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_EEPROM_OP,
869 GLOBAL2_EEPROM_OP_BUSY);
870}
871
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700872/* Must be called with SMI lock held */
873static int _mv88e6xxx_atu_wait(struct dsa_switch *ds)
874{
Andrew Lunncca8b132015-04-02 04:06:39 +0200875 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_ATU_OP,
876 GLOBAL_ATU_OP_BUSY);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700877}
878
Andrew Lunn56d95e22015-06-20 18:42:33 +0200879/* Must be called with SMI lock held */
880static int _mv88e6xxx_scratch_wait(struct dsa_switch *ds)
881{
882 return _mv88e6xxx_wait(ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
883 GLOBAL2_SCRATCH_BUSY);
884}
885
Andrew Lunn3898c142015-05-06 01:09:53 +0200886/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200887static int _mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int addr,
888 int regnum)
Andrew Lunnf3044682015-02-14 19:17:50 +0100889{
890 int ret;
891
Andrew Lunn3898c142015-05-06 01:09:53 +0200892 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
893 GLOBAL2_SMI_OP_22_READ | (addr << 5) |
894 regnum);
Andrew Lunnf3044682015-02-14 19:17:50 +0100895 if (ret < 0)
896 return ret;
897
Andrew Lunn3898c142015-05-06 01:09:53 +0200898 ret = _mv88e6xxx_phy_wait(ds);
899 if (ret < 0)
900 return ret;
901
902 return _mv88e6xxx_reg_read(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA);
Andrew Lunnf3044682015-02-14 19:17:50 +0100903}
904
Andrew Lunn3898c142015-05-06 01:09:53 +0200905/* Must be called with SMI mutex held */
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +0200906static int _mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int addr,
907 int regnum, u16 val)
Andrew Lunnf3044682015-02-14 19:17:50 +0100908{
Andrew Lunn3898c142015-05-06 01:09:53 +0200909 int ret;
Andrew Lunnf3044682015-02-14 19:17:50 +0100910
Andrew Lunn3898c142015-05-06 01:09:53 +0200911 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_DATA, val);
912 if (ret < 0)
913 return ret;
914
915 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL2, GLOBAL2_SMI_OP,
916 GLOBAL2_SMI_OP_22_WRITE | (addr << 5) |
917 regnum);
918
919 return _mv88e6xxx_phy_wait(ds);
Andrew Lunnf3044682015-02-14 19:17:50 +0100920}
921
Guenter Roeck11b3b452015-03-06 22:23:51 -0800922int mv88e6xxx_get_eee(struct dsa_switch *ds, int port, struct ethtool_eee *e)
923{
Andrew Lunn2f40c692015-04-02 04:06:37 +0200924 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800925 int reg;
926
Andrew Lunn3898c142015-05-06 01:09:53 +0200927 mutex_lock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200928
929 reg = _mv88e6xxx_phy_read_indirect(ds, port, 16);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800930 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200931 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800932
933 e->eee_enabled = !!(reg & 0x0200);
934 e->tx_lpi_enabled = !!(reg & 0x0100);
935
Andrew Lunn3898c142015-05-06 01:09:53 +0200936 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_STATUS);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800937 if (reg < 0)
Andrew Lunn2f40c692015-04-02 04:06:37 +0200938 goto out;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800939
Andrew Lunncca8b132015-04-02 04:06:39 +0200940 e->eee_active = !!(reg & PORT_STATUS_EEE);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200941 reg = 0;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800942
Andrew Lunn2f40c692015-04-02 04:06:37 +0200943out:
Andrew Lunn3898c142015-05-06 01:09:53 +0200944 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200945 return reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800946}
947
948int mv88e6xxx_set_eee(struct dsa_switch *ds, int port,
949 struct phy_device *phydev, struct ethtool_eee *e)
950{
Andrew Lunn2f40c692015-04-02 04:06:37 +0200951 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
952 int reg;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800953 int ret;
954
Andrew Lunn3898c142015-05-06 01:09:53 +0200955 mutex_lock(&ps->smi_mutex);
Guenter Roeck11b3b452015-03-06 22:23:51 -0800956
Andrew Lunn2f40c692015-04-02 04:06:37 +0200957 ret = _mv88e6xxx_phy_read_indirect(ds, port, 16);
958 if (ret < 0)
959 goto out;
960
961 reg = ret & ~0x0300;
962 if (e->eee_enabled)
963 reg |= 0x0200;
964 if (e->tx_lpi_enabled)
965 reg |= 0x0100;
966
967 ret = _mv88e6xxx_phy_write_indirect(ds, port, 16, reg);
968out:
Andrew Lunn3898c142015-05-06 01:09:53 +0200969 mutex_unlock(&ps->smi_mutex);
Andrew Lunn2f40c692015-04-02 04:06:37 +0200970
971 return ret;
Guenter Roeck11b3b452015-03-06 22:23:51 -0800972}
973
Vivien Didelot70cc99d2015-09-04 14:34:10 -0400974static int _mv88e6xxx_atu_cmd(struct dsa_switch *ds, u16 cmd)
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700975{
976 int ret;
977
Andrew Lunncca8b132015-04-02 04:06:39 +0200978 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_OP, cmd);
Guenter Roeckfacd95b2015-03-26 18:36:35 -0700979 if (ret < 0)
980 return ret;
981
982 return _mv88e6xxx_atu_wait(ds);
983}
984
Vivien Didelot37705b72015-09-04 14:34:11 -0400985static int _mv88e6xxx_atu_data_write(struct dsa_switch *ds,
986 struct mv88e6xxx_atu_entry *entry)
987{
988 u16 data = entry->state & GLOBAL_ATU_DATA_STATE_MASK;
989
990 if (entry->state != GLOBAL_ATU_DATA_STATE_UNUSED) {
991 unsigned int mask, shift;
992
993 if (entry->trunk) {
994 data |= GLOBAL_ATU_DATA_TRUNK;
995 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
996 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
997 } else {
998 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
999 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1000 }
1001
1002 data |= (entry->portv_trunkid << shift) & mask;
1003 }
1004
1005 return _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_DATA, data);
1006}
1007
Vivien Didelot7fb5e752015-09-04 14:34:12 -04001008static int _mv88e6xxx_atu_flush_move(struct dsa_switch *ds,
1009 struct mv88e6xxx_atu_entry *entry,
1010 bool static_too)
1011{
1012 int op;
1013 int err;
1014
1015 err = _mv88e6xxx_atu_wait(ds);
1016 if (err)
1017 return err;
1018
1019 err = _mv88e6xxx_atu_data_write(ds, entry);
1020 if (err)
1021 return err;
1022
1023 if (entry->fid) {
1024 err = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
1025 entry->fid);
1026 if (err)
1027 return err;
1028
1029 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL_DB :
1030 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC_DB;
1031 } else {
1032 op = static_too ? GLOBAL_ATU_OP_FLUSH_MOVE_ALL :
1033 GLOBAL_ATU_OP_FLUSH_MOVE_NON_STATIC;
1034 }
1035
1036 return _mv88e6xxx_atu_cmd(ds, op);
1037}
1038
1039static int _mv88e6xxx_atu_flush(struct dsa_switch *ds, u16 fid, bool static_too)
1040{
1041 struct mv88e6xxx_atu_entry entry = {
1042 .fid = fid,
1043 .state = 0, /* EntryState bits must be 0 */
1044 };
1045
1046 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1047}
1048
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001049static int _mv88e6xxx_flush_fid(struct dsa_switch *ds, int fid)
1050{
Vivien Didelot7fb5e752015-09-04 14:34:12 -04001051 return _mv88e6xxx_atu_flush(ds, fid, false);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001052}
1053
Vivien Didelot9f4d55d2015-09-04 14:34:15 -04001054static int _mv88e6xxx_atu_move(struct dsa_switch *ds, u16 fid, int from_port,
1055 int to_port, bool static_too)
1056{
1057 struct mv88e6xxx_atu_entry entry = {
1058 .trunk = false,
1059 .fid = fid,
1060 };
1061
1062 /* EntryState bits must be 0xF */
1063 entry.state = GLOBAL_ATU_DATA_STATE_MASK;
1064
1065 /* ToPort and FromPort are respectively in PortVec bits 7:4 and 3:0 */
1066 entry.portv_trunkid = (to_port & 0x0f) << 4;
1067 entry.portv_trunkid |= from_port & 0x0f;
1068
1069 return _mv88e6xxx_atu_flush_move(ds, &entry, static_too);
1070}
1071
1072static int _mv88e6xxx_atu_remove(struct dsa_switch *ds, u16 fid, int port,
1073 bool static_too)
1074{
1075 /* Destination port 0xF means remove the entries */
1076 return _mv88e6xxx_atu_move(ds, fid, port, 0x0f, static_too);
1077}
1078
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001079static int mv88e6xxx_set_port_state(struct dsa_switch *ds, int port, u8 state)
1080{
1081 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Geert Uytterhoevenc3ffe6d2015-04-16 20:49:14 +02001082 int reg, ret = 0;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001083 u8 oldstate;
1084
1085 mutex_lock(&ps->smi_mutex);
1086
Andrew Lunncca8b132015-04-02 04:06:39 +02001087 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_CONTROL);
Guenter Roeck538cc282015-04-15 22:12:42 -07001088 if (reg < 0) {
1089 ret = reg;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001090 goto abort;
Guenter Roeck538cc282015-04-15 22:12:42 -07001091 }
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001092
Andrew Lunncca8b132015-04-02 04:06:39 +02001093 oldstate = reg & PORT_CONTROL_STATE_MASK;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001094 if (oldstate != state) {
1095 /* Flush forwarding database if we're moving a port
1096 * from Learning or Forwarding state to Disabled or
1097 * Blocking or Listening state.
1098 */
Andrew Lunncca8b132015-04-02 04:06:39 +02001099 if (oldstate >= PORT_CONTROL_STATE_LEARNING &&
1100 state <= PORT_CONTROL_STATE_BLOCKING) {
Vivien Didelot2b8157b2015-09-04 14:34:16 -04001101 ret = _mv88e6xxx_atu_remove(ds, 0, port, false);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001102 if (ret)
1103 goto abort;
1104 }
Andrew Lunncca8b132015-04-02 04:06:39 +02001105 reg = (reg & ~PORT_CONTROL_STATE_MASK) | state;
1106 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL,
1107 reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001108 }
1109
1110abort:
1111 mutex_unlock(&ps->smi_mutex);
1112 return ret;
1113}
1114
1115/* Must be called with smi lock held */
1116static int _mv88e6xxx_update_port_config(struct dsa_switch *ds, int port)
1117{
1118 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1119 u8 fid = ps->fid[port];
1120 u16 reg = fid << 12;
1121
1122 if (dsa_is_cpu_port(ds, port))
1123 reg |= ds->phys_port_mask;
1124 else
1125 reg |= (ps->bridge_mask[fid] |
1126 (1 << dsa_upstream_port(ds))) & ~(1 << port);
1127
Andrew Lunncca8b132015-04-02 04:06:39 +02001128 return _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_BASE_VLAN, reg);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001129}
1130
1131/* Must be called with smi lock held */
1132static int _mv88e6xxx_update_bridge_config(struct dsa_switch *ds, int fid)
1133{
1134 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1135 int port;
1136 u32 mask;
1137 int ret;
1138
1139 mask = ds->phys_port_mask;
1140 while (mask) {
1141 port = __ffs(mask);
1142 mask &= ~(1 << port);
1143 if (ps->fid[port] != fid)
1144 continue;
1145
1146 ret = _mv88e6xxx_update_port_config(ds, port);
1147 if (ret)
1148 return ret;
1149 }
1150
1151 return _mv88e6xxx_flush_fid(ds, fid);
1152}
1153
1154/* Bridge handling functions */
1155
1156int mv88e6xxx_join_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1157{
1158 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1159 int ret = 0;
1160 u32 nmask;
1161 int fid;
1162
1163 /* If the bridge group is not empty, join that group.
1164 * Otherwise create a new group.
1165 */
1166 fid = ps->fid[port];
1167 nmask = br_port_mask & ~(1 << port);
1168 if (nmask)
1169 fid = ps->fid[__ffs(nmask)];
1170
1171 nmask = ps->bridge_mask[fid] | (1 << port);
1172 if (nmask != br_port_mask) {
1173 netdev_err(ds->ports[port],
1174 "join: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1175 fid, br_port_mask, nmask);
1176 return -EINVAL;
1177 }
1178
1179 mutex_lock(&ps->smi_mutex);
1180
1181 ps->bridge_mask[fid] = br_port_mask;
1182
1183 if (fid != ps->fid[port]) {
Vivien Didelot194fea72015-08-10 09:09:47 -04001184 clear_bit(ps->fid[port], ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001185 ps->fid[port] = fid;
1186 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1187 }
1188
1189 mutex_unlock(&ps->smi_mutex);
1190
1191 return ret;
1192}
1193
1194int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1195{
1196 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1197 u8 fid, newfid;
1198 int ret;
1199
1200 fid = ps->fid[port];
1201
1202 if (ps->bridge_mask[fid] != br_port_mask) {
1203 netdev_err(ds->ports[port],
1204 "leave: Bridge port mask mismatch fid=%d mask=0x%x expected 0x%x\n",
1205 fid, br_port_mask, ps->bridge_mask[fid]);
1206 return -EINVAL;
1207 }
1208
1209 /* If the port was the last port of a bridge, we are done.
1210 * Otherwise assign a new fid to the port, and fix up
1211 * the bridge configuration.
1212 */
1213 if (br_port_mask == (1 << port))
1214 return 0;
1215
1216 mutex_lock(&ps->smi_mutex);
1217
Vivien Didelot194fea72015-08-10 09:09:47 -04001218 newfid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID, 1);
1219 if (unlikely(newfid > ps->num_ports)) {
1220 netdev_err(ds->ports[port], "all first %d FIDs are used\n",
1221 ps->num_ports);
1222 ret = -ENOSPC;
1223 goto unlock;
1224 }
1225
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001226 ps->fid[port] = newfid;
Vivien Didelot194fea72015-08-10 09:09:47 -04001227 set_bit(newfid, ps->fid_bitmap);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001228 ps->bridge_mask[fid] &= ~(1 << port);
1229 ps->bridge_mask[newfid] = 1 << port;
1230
1231 ret = _mv88e6xxx_update_bridge_config(ds, fid);
1232 if (!ret)
1233 ret = _mv88e6xxx_update_bridge_config(ds, newfid);
1234
Vivien Didelot194fea72015-08-10 09:09:47 -04001235unlock:
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001236 mutex_unlock(&ps->smi_mutex);
1237
1238 return ret;
1239}
1240
1241int mv88e6xxx_port_stp_update(struct dsa_switch *ds, int port, u8 state)
1242{
1243 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1244 int stp_state;
1245
1246 switch (state) {
1247 case BR_STATE_DISABLED:
Andrew Lunncca8b132015-04-02 04:06:39 +02001248 stp_state = PORT_CONTROL_STATE_DISABLED;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001249 break;
1250 case BR_STATE_BLOCKING:
1251 case BR_STATE_LISTENING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001252 stp_state = PORT_CONTROL_STATE_BLOCKING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001253 break;
1254 case BR_STATE_LEARNING:
Andrew Lunncca8b132015-04-02 04:06:39 +02001255 stp_state = PORT_CONTROL_STATE_LEARNING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001256 break;
1257 case BR_STATE_FORWARDING:
1258 default:
Andrew Lunncca8b132015-04-02 04:06:39 +02001259 stp_state = PORT_CONTROL_STATE_FORWARDING;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001260 break;
1261 }
1262
1263 netdev_dbg(ds->ports[port], "port state %d [%d]\n", state, stp_state);
1264
1265 /* mv88e6xxx_port_stp_update may be called with softirqs disabled,
1266 * so we can not update the port state directly but need to schedule it.
1267 */
1268 ps->port_state[port] = stp_state;
1269 set_bit(port, &ps->port_state_update_mask);
1270 schedule_work(&ps->bridge_work);
1271
1272 return 0;
1273}
1274
Vivien Didelotb8fee952015-08-13 12:52:19 -04001275int mv88e6xxx_port_pvid_get(struct dsa_switch *ds, int port, u16 *pvid)
1276{
1277 int ret;
1278
1279 ret = mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_DEFAULT_VLAN);
1280 if (ret < 0)
1281 return ret;
1282
1283 *pvid = ret & PORT_DEFAULT_VLAN_MASK;
1284
1285 return 0;
1286}
1287
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001288int mv88e6xxx_port_pvid_set(struct dsa_switch *ds, int port, u16 pvid)
1289{
1290 return mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
1291 pvid & PORT_DEFAULT_VLAN_MASK);
1292}
1293
Vivien Didelot6b17e862015-08-13 12:52:18 -04001294static int _mv88e6xxx_vtu_wait(struct dsa_switch *ds)
1295{
1296 return _mv88e6xxx_wait(ds, REG_GLOBAL, GLOBAL_VTU_OP,
1297 GLOBAL_VTU_OP_BUSY);
1298}
1299
1300static int _mv88e6xxx_vtu_cmd(struct dsa_switch *ds, u16 op)
1301{
1302 int ret;
1303
1304 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_OP, op);
1305 if (ret < 0)
1306 return ret;
1307
1308 return _mv88e6xxx_vtu_wait(ds);
1309}
1310
1311static int _mv88e6xxx_vtu_stu_flush(struct dsa_switch *ds)
1312{
1313 int ret;
1314
1315 ret = _mv88e6xxx_vtu_wait(ds);
1316 if (ret < 0)
1317 return ret;
1318
1319 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_FLUSH_ALL);
1320}
1321
Vivien Didelotb8fee952015-08-13 12:52:19 -04001322static int _mv88e6xxx_vtu_stu_data_read(struct dsa_switch *ds,
1323 struct mv88e6xxx_vtu_stu_entry *entry,
1324 unsigned int nibble_offset)
1325{
1326 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1327 u16 regs[3];
1328 int i;
1329 int ret;
1330
1331 for (i = 0; i < 3; ++i) {
1332 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1333 GLOBAL_VTU_DATA_0_3 + i);
1334 if (ret < 0)
1335 return ret;
1336
1337 regs[i] = ret;
1338 }
1339
1340 for (i = 0; i < ps->num_ports; ++i) {
1341 unsigned int shift = (i % 4) * 4 + nibble_offset;
1342 u16 reg = regs[i / 4];
1343
1344 entry->data[i] = (reg >> shift) & GLOBAL_VTU_STU_DATA_MASK;
1345 }
1346
1347 return 0;
1348}
1349
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001350static int _mv88e6xxx_vtu_stu_data_write(struct dsa_switch *ds,
1351 struct mv88e6xxx_vtu_stu_entry *entry,
1352 unsigned int nibble_offset)
1353{
1354 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1355 u16 regs[3] = { 0 };
1356 int i;
1357 int ret;
1358
1359 for (i = 0; i < ps->num_ports; ++i) {
1360 unsigned int shift = (i % 4) * 4 + nibble_offset;
1361 u8 data = entry->data[i];
1362
1363 regs[i / 4] |= (data & GLOBAL_VTU_STU_DATA_MASK) << shift;
1364 }
1365
1366 for (i = 0; i < 3; ++i) {
1367 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL,
1368 GLOBAL_VTU_DATA_0_3 + i, regs[i]);
1369 if (ret < 0)
1370 return ret;
1371 }
1372
1373 return 0;
1374}
1375
Vivien Didelotb8fee952015-08-13 12:52:19 -04001376static int _mv88e6xxx_vtu_getnext(struct dsa_switch *ds, u16 vid,
1377 struct mv88e6xxx_vtu_stu_entry *entry)
1378{
1379 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1380 int ret;
1381
1382 ret = _mv88e6xxx_vtu_wait(ds);
1383 if (ret < 0)
1384 return ret;
1385
1386 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID,
1387 vid & GLOBAL_VTU_VID_MASK);
1388 if (ret < 0)
1389 return ret;
1390
1391 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_GET_NEXT);
1392 if (ret < 0)
1393 return ret;
1394
1395 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1396 if (ret < 0)
1397 return ret;
1398
1399 next.vid = ret & GLOBAL_VTU_VID_MASK;
1400 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1401
1402 if (next.valid) {
1403 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 0);
1404 if (ret < 0)
1405 return ret;
1406
1407 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1408 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1409 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1410 GLOBAL_VTU_FID);
1411 if (ret < 0)
1412 return ret;
1413
1414 next.fid = ret & GLOBAL_VTU_FID_MASK;
1415
1416 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1417 GLOBAL_VTU_SID);
1418 if (ret < 0)
1419 return ret;
1420
1421 next.sid = ret & GLOBAL_VTU_SID_MASK;
1422 }
1423 }
1424
1425 *entry = next;
1426 return 0;
1427}
1428
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001429static int _mv88e6xxx_vtu_loadpurge(struct dsa_switch *ds,
1430 struct mv88e6xxx_vtu_stu_entry *entry)
1431{
1432 u16 reg = 0;
1433 int ret;
1434
1435 ret = _mv88e6xxx_vtu_wait(ds);
1436 if (ret < 0)
1437 return ret;
1438
1439 if (!entry->valid)
1440 goto loadpurge;
1441
1442 /* Write port member tags */
1443 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 0);
1444 if (ret < 0)
1445 return ret;
1446
1447 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1448 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1449 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1450 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1451 if (ret < 0)
1452 return ret;
1453
1454 reg = entry->fid & GLOBAL_VTU_FID_MASK;
1455 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_FID, reg);
1456 if (ret < 0)
1457 return ret;
1458 }
1459
1460 reg = GLOBAL_VTU_VID_VALID;
1461loadpurge:
1462 reg |= entry->vid & GLOBAL_VTU_VID_MASK;
1463 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1464 if (ret < 0)
1465 return ret;
1466
1467 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_VTU_LOAD_PURGE);
1468}
1469
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001470static int _mv88e6xxx_stu_getnext(struct dsa_switch *ds, u8 sid,
1471 struct mv88e6xxx_vtu_stu_entry *entry)
1472{
1473 struct mv88e6xxx_vtu_stu_entry next = { 0 };
1474 int ret;
1475
1476 ret = _mv88e6xxx_vtu_wait(ds);
1477 if (ret < 0)
1478 return ret;
1479
1480 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID,
1481 sid & GLOBAL_VTU_SID_MASK);
1482 if (ret < 0)
1483 return ret;
1484
1485 ret = _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_GET_NEXT);
1486 if (ret < 0)
1487 return ret;
1488
1489 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_SID);
1490 if (ret < 0)
1491 return ret;
1492
1493 next.sid = ret & GLOBAL_VTU_SID_MASK;
1494
1495 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_VTU_VID);
1496 if (ret < 0)
1497 return ret;
1498
1499 next.valid = !!(ret & GLOBAL_VTU_VID_VALID);
1500
1501 if (next.valid) {
1502 ret = _mv88e6xxx_vtu_stu_data_read(ds, &next, 2);
1503 if (ret < 0)
1504 return ret;
1505 }
1506
1507 *entry = next;
1508 return 0;
1509}
1510
1511static int _mv88e6xxx_stu_loadpurge(struct dsa_switch *ds,
1512 struct mv88e6xxx_vtu_stu_entry *entry)
1513{
1514 u16 reg = 0;
1515 int ret;
1516
1517 ret = _mv88e6xxx_vtu_wait(ds);
1518 if (ret < 0)
1519 return ret;
1520
1521 if (!entry->valid)
1522 goto loadpurge;
1523
1524 /* Write port states */
1525 ret = _mv88e6xxx_vtu_stu_data_write(ds, entry, 2);
1526 if (ret < 0)
1527 return ret;
1528
1529 reg = GLOBAL_VTU_VID_VALID;
1530loadpurge:
1531 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_VID, reg);
1532 if (ret < 0)
1533 return ret;
1534
1535 reg = entry->sid & GLOBAL_VTU_SID_MASK;
1536 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_VTU_SID, reg);
1537 if (ret < 0)
1538 return ret;
1539
1540 return _mv88e6xxx_vtu_cmd(ds, GLOBAL_VTU_OP_STU_LOAD_PURGE);
1541}
1542
1543static int _mv88e6xxx_vlan_init(struct dsa_switch *ds, u16 vid,
1544 struct mv88e6xxx_vtu_stu_entry *entry)
1545{
1546 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1547 struct mv88e6xxx_vtu_stu_entry vlan = {
1548 .valid = true,
1549 .vid = vid,
1550 };
1551 int i;
1552
1553 /* exclude all ports except the CPU */
1554 for (i = 0; i < ps->num_ports; ++i)
1555 vlan.data[i] = dsa_is_cpu_port(ds, i) ?
1556 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED :
1557 GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1558
1559 if (mv88e6xxx_6097_family(ds) || mv88e6xxx_6165_family(ds) ||
1560 mv88e6xxx_6351_family(ds) || mv88e6xxx_6352_family(ds)) {
1561 struct mv88e6xxx_vtu_stu_entry vstp;
1562 int err;
1563
1564 /* Adding a VTU entry requires a valid STU entry. As VSTP is not
1565 * implemented, only one STU entry is needed to cover all VTU
1566 * entries. Thus, validate the SID 0.
1567 */
1568 vlan.sid = 0;
1569 err = _mv88e6xxx_stu_getnext(ds, GLOBAL_VTU_SID_MASK, &vstp);
1570 if (err)
1571 return err;
1572
1573 if (vstp.sid != vlan.sid || !vstp.valid) {
1574 memset(&vstp, 0, sizeof(vstp));
1575 vstp.valid = true;
1576 vstp.sid = vlan.sid;
1577
1578 err = _mv88e6xxx_stu_loadpurge(ds, &vstp);
1579 if (err)
1580 return err;
1581 }
1582
1583 /* Non-bridged ports and bridge groups use FIDs from 1 to
1584 * num_ports; VLANs use FIDs from num_ports+1 to 4095.
1585 */
1586 vlan.fid = find_next_zero_bit(ps->fid_bitmap, VLAN_N_VID,
1587 ps->num_ports + 1);
1588 if (unlikely(vlan.fid == VLAN_N_VID)) {
1589 pr_err("no more FID available for VLAN %d\n", vid);
1590 return -ENOSPC;
1591 }
1592
Vivien Didelot7c400012015-09-04 14:34:14 -04001593 /* Clear all MAC addresses from the new database */
1594 err = _mv88e6xxx_atu_flush(ds, vlan.fid, true);
Vivien Didelot0d3b33e2015-08-13 12:52:22 -04001595 if (err)
1596 return err;
1597
1598 set_bit(vlan.fid, ps->fid_bitmap);
1599 }
1600
1601 *entry = vlan;
1602 return 0;
1603}
1604
1605int mv88e6xxx_port_vlan_add(struct dsa_switch *ds, int port, u16 vid,
1606 bool untagged)
1607{
1608 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1609 struct mv88e6xxx_vtu_stu_entry vlan;
1610 int err;
1611
1612 mutex_lock(&ps->smi_mutex);
1613 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1614 if (err)
1615 goto unlock;
1616
1617 if (vlan.vid != vid || !vlan.valid) {
1618 err = _mv88e6xxx_vlan_init(ds, vid, &vlan);
1619 if (err)
1620 goto unlock;
1621 }
1622
1623 vlan.data[port] = untagged ?
1624 GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED :
1625 GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED;
1626
1627 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1628unlock:
1629 mutex_unlock(&ps->smi_mutex);
1630
1631 return err;
1632}
1633
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001634int mv88e6xxx_port_vlan_del(struct dsa_switch *ds, int port, u16 vid)
1635{
1636 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1637 struct mv88e6xxx_vtu_stu_entry vlan;
1638 bool keep = false;
1639 int i, err;
1640
1641 mutex_lock(&ps->smi_mutex);
1642
1643 err = _mv88e6xxx_vtu_getnext(ds, vid - 1, &vlan);
1644 if (err)
1645 goto unlock;
1646
1647 if (vlan.vid != vid || !vlan.valid ||
1648 vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1649 err = -ENOENT;
1650 goto unlock;
1651 }
1652
1653 vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
1654
1655 /* keep the VLAN unless all ports are excluded */
1656 for (i = 0; i < ps->num_ports; ++i) {
1657 if (dsa_is_cpu_port(ds, i))
1658 continue;
1659
1660 if (vlan.data[i] != GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER) {
1661 keep = true;
1662 break;
1663 }
1664 }
1665
1666 vlan.valid = keep;
1667 err = _mv88e6xxx_vtu_loadpurge(ds, &vlan);
1668 if (err)
1669 goto unlock;
1670
Vivien Didelot9f4d55d2015-09-04 14:34:15 -04001671 err = _mv88e6xxx_atu_remove(ds, vlan.fid, port, false);
1672 if (err)
1673 goto unlock;
1674
Vivien Didelot7dad08d2015-08-13 12:52:21 -04001675 if (!keep)
1676 clear_bit(vlan.fid, ps->fid_bitmap);
1677
1678unlock:
1679 mutex_unlock(&ps->smi_mutex);
1680
1681 return err;
1682}
1683
Vivien Didelot02512b62015-08-13 12:52:20 -04001684static int _mv88e6xxx_port_vtu_getnext(struct dsa_switch *ds, int port, u16 vid,
1685 struct mv88e6xxx_vtu_stu_entry *entry)
1686{
1687 int err;
1688
1689 do {
1690 if (vid == 4095)
1691 return -ENOENT;
1692
1693 err = _mv88e6xxx_vtu_getnext(ds, vid, entry);
1694 if (err)
1695 return err;
1696
1697 if (!entry->valid)
1698 return -ENOENT;
1699
1700 vid = entry->vid;
1701 } while (entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED &&
1702 entry->data[port] != GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED);
1703
1704 return 0;
1705}
1706
Vivien Didelotb8fee952015-08-13 12:52:19 -04001707int mv88e6xxx_vlan_getnext(struct dsa_switch *ds, u16 *vid,
1708 unsigned long *ports, unsigned long *untagged)
1709{
1710 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1711 struct mv88e6xxx_vtu_stu_entry next;
1712 int port;
1713 int err;
1714
1715 if (*vid == 4095)
1716 return -ENOENT;
1717
1718 mutex_lock(&ps->smi_mutex);
1719 err = _mv88e6xxx_vtu_getnext(ds, *vid, &next);
1720 mutex_unlock(&ps->smi_mutex);
1721
1722 if (err)
1723 return err;
1724
1725 if (!next.valid)
1726 return -ENOENT;
1727
1728 *vid = next.vid;
1729
1730 for (port = 0; port < ps->num_ports; ++port) {
1731 clear_bit(port, ports);
1732 clear_bit(port, untagged);
1733
1734 if (dsa_is_cpu_port(ds, port))
1735 continue;
1736
1737 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_TAGGED ||
1738 next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1739 set_bit(port, ports);
1740
1741 if (next.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_UNTAGGED)
1742 set_bit(port, untagged);
1743 }
1744
1745 return 0;
1746}
1747
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001748static int _mv88e6xxx_atu_mac_write(struct dsa_switch *ds,
1749 const unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001750{
1751 int i, ret;
1752
1753 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001754 ret = _mv88e6xxx_reg_write(
1755 ds, REG_GLOBAL, GLOBAL_ATU_MAC_01 + i,
1756 (addr[i * 2] << 8) | addr[i * 2 + 1]);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001757 if (ret < 0)
1758 return ret;
1759 }
1760
1761 return 0;
1762}
1763
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001764static int _mv88e6xxx_atu_mac_read(struct dsa_switch *ds, unsigned char *addr)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001765{
1766 int i, ret;
1767
1768 for (i = 0; i < 3; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02001769 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL,
1770 GLOBAL_ATU_MAC_01 + i);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001771 if (ret < 0)
1772 return ret;
1773 addr[i * 2] = ret >> 8;
1774 addr[i * 2 + 1] = ret & 0xff;
1775 }
1776
1777 return 0;
1778}
1779
Vivien Didelotfd231c82015-08-10 09:09:50 -04001780static int _mv88e6xxx_atu_load(struct dsa_switch *ds,
1781 struct mv88e6xxx_atu_entry *entry)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001782{
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001783 int ret;
1784
1785 ret = _mv88e6xxx_atu_wait(ds);
1786 if (ret < 0)
1787 return ret;
1788
Vivien Didelotfd231c82015-08-10 09:09:50 -04001789 ret = _mv88e6xxx_atu_mac_write(ds, entry->mac);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001790 if (ret < 0)
1791 return ret;
1792
Vivien Didelot37705b72015-09-04 14:34:11 -04001793 ret = _mv88e6xxx_atu_data_write(ds, entry);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001794 if (ret < 0)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001795 return ret;
1796
Vivien Didelot70cc99d2015-09-04 14:34:10 -04001797 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, entry->fid);
1798 if (ret < 0)
1799 return ret;
1800
1801 return _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_LOAD_DB);
Vivien Didelotfd231c82015-08-10 09:09:50 -04001802}
David S. Millercdf09692015-08-11 12:00:37 -07001803
Vivien Didelotfd231c82015-08-10 09:09:50 -04001804static int _mv88e6xxx_port_vid_to_fid(struct dsa_switch *ds, int port, u16 vid)
1805{
1806 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot02512b62015-08-13 12:52:20 -04001807 struct mv88e6xxx_vtu_stu_entry vlan;
1808 int err;
Vivien Didelotfd231c82015-08-10 09:09:50 -04001809
1810 if (vid == 0)
1811 return ps->fid[port];
1812
Vivien Didelot02512b62015-08-13 12:52:20 -04001813 err = _mv88e6xxx_port_vtu_getnext(ds, port, vid - 1, &vlan);
1814 if (err)
1815 return err;
1816
1817 if (vlan.vid == vid)
1818 return vlan.fid;
1819
Vivien Didelotfd231c82015-08-10 09:09:50 -04001820 return -ENOENT;
1821}
1822
1823static int _mv88e6xxx_port_fdb_load(struct dsa_switch *ds, int port,
1824 const unsigned char *addr, u16 vid,
1825 u8 state)
1826{
1827 struct mv88e6xxx_atu_entry entry = { 0 };
1828 int ret;
1829
1830 ret = _mv88e6xxx_port_vid_to_fid(ds, port, vid);
1831 if (ret < 0)
1832 return ret;
1833
1834 entry.fid = ret;
1835 entry.state = state;
1836 ether_addr_copy(entry.mac, addr);
1837 if (state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1838 entry.trunk = false;
1839 entry.portv_trunkid = BIT(port);
1840 }
1841
1842 return _mv88e6xxx_atu_load(ds, &entry);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001843}
1844
Vivien Didelot146a3202015-10-08 11:35:12 -04001845int mv88e6xxx_port_fdb_prepare(struct dsa_switch *ds, int port,
1846 const struct switchdev_obj_port_fdb *fdb,
1847 struct switchdev_trans *trans)
1848{
1849 /* We don't need any dynamic resource from the kernel (yet),
1850 * so skip the prepare phase.
1851 */
1852 return 0;
1853}
1854
David S. Millercdf09692015-08-11 12:00:37 -07001855int mv88e6xxx_port_fdb_add(struct dsa_switch *ds, int port,
Vivien Didelot1f36faf2015-10-08 11:35:13 -04001856 const struct switchdev_obj_port_fdb *fdb,
1857 struct switchdev_trans *trans)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001858{
Vivien Didelot1f36faf2015-10-08 11:35:13 -04001859 int state = is_multicast_ether_addr(fdb->addr) ?
David S. Millercdf09692015-08-11 12:00:37 -07001860 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1861 GLOBAL_ATU_DATA_STATE_UC_STATIC;
1862 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot6630e232015-08-06 01:44:07 -04001863 int ret;
1864
David S. Millercdf09692015-08-11 12:00:37 -07001865 mutex_lock(&ps->smi_mutex);
Vivien Didelot1f36faf2015-10-08 11:35:13 -04001866 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid, state);
David S. Millercdf09692015-08-11 12:00:37 -07001867 mutex_unlock(&ps->smi_mutex);
1868
1869 return ret;
1870}
1871
1872int mv88e6xxx_port_fdb_del(struct dsa_switch *ds, int port,
Vivien Didelot8057b3e2015-10-08 11:35:14 -04001873 const struct switchdev_obj_port_fdb *fdb)
David S. Millercdf09692015-08-11 12:00:37 -07001874{
1875 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
1876 int ret;
1877
1878 mutex_lock(&ps->smi_mutex);
Vivien Didelot8057b3e2015-10-08 11:35:14 -04001879 ret = _mv88e6xxx_port_fdb_load(ds, port, fdb->addr, fdb->vid,
David S. Millercdf09692015-08-11 12:00:37 -07001880 GLOBAL_ATU_DATA_STATE_UNUSED);
1881 mutex_unlock(&ps->smi_mutex);
1882
1883 return ret;
1884}
1885
Vivien Didelot1d194042015-08-10 09:09:51 -04001886static int _mv88e6xxx_atu_getnext(struct dsa_switch *ds, u16 fid,
1887 const unsigned char *addr,
1888 struct mv88e6xxx_atu_entry *entry)
David S. Millercdf09692015-08-11 12:00:37 -07001889{
Vivien Didelot1d194042015-08-10 09:09:51 -04001890 struct mv88e6xxx_atu_entry next = { 0 };
1891 int ret;
1892
1893 next.fid = fid;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001894
1895 ret = _mv88e6xxx_atu_wait(ds);
1896 if (ret < 0)
1897 return ret;
1898
Vivien Didelotc5723ac2015-08-10 09:09:48 -04001899 ret = _mv88e6xxx_atu_mac_write(ds, addr);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001900 if (ret < 0)
1901 return ret;
1902
Vivien Didelot70cc99d2015-09-04 14:34:10 -04001903 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID, fid);
1904 if (ret < 0)
1905 return ret;
1906
1907 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001908 if (ret < 0)
1909 return ret;
1910
Vivien Didelot1d194042015-08-10 09:09:51 -04001911 ret = _mv88e6xxx_atu_mac_read(ds, next.mac);
1912 if (ret < 0)
1913 return ret;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001914
Vivien Didelot1d194042015-08-10 09:09:51 -04001915 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
1916 if (ret < 0)
1917 return ret;
1918
1919 next.state = ret & GLOBAL_ATU_DATA_STATE_MASK;
1920 if (next.state != GLOBAL_ATU_DATA_STATE_UNUSED) {
1921 unsigned int mask, shift;
1922
1923 if (ret & GLOBAL_ATU_DATA_TRUNK) {
1924 next.trunk = true;
1925 mask = GLOBAL_ATU_DATA_TRUNK_ID_MASK;
1926 shift = GLOBAL_ATU_DATA_TRUNK_ID_SHIFT;
1927 } else {
1928 next.trunk = false;
1929 mask = GLOBAL_ATU_DATA_PORT_VECTOR_MASK;
1930 shift = GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT;
1931 }
1932
1933 next.portv_trunkid = (ret & mask) >> shift;
1934 }
1935
1936 *entry = next;
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001937 return 0;
1938}
1939
David S. Millercdf09692015-08-11 12:00:37 -07001940/* get next entry for port */
1941int mv88e6xxx_port_fdb_getnext(struct dsa_switch *ds, int port,
Vivien Didelot2a778e12015-08-10 09:09:49 -04001942 unsigned char *addr, u16 *vid, bool *is_static)
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001943{
1944 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot1d194042015-08-10 09:09:51 -04001945 struct mv88e6xxx_atu_entry next;
1946 u16 fid;
Vivien Didelot87820512015-08-06 01:44:08 -04001947 int ret;
1948
1949 mutex_lock(&ps->smi_mutex);
Vivien Didelot1d194042015-08-10 09:09:51 -04001950
1951 ret = _mv88e6xxx_port_vid_to_fid(ds, port, *vid);
1952 if (ret < 0)
1953 goto unlock;
1954 fid = ret;
1955
1956 do {
1957 if (is_broadcast_ether_addr(addr)) {
Vivien Didelot02512b62015-08-13 12:52:20 -04001958 struct mv88e6xxx_vtu_stu_entry vtu;
1959
1960 ret = _mv88e6xxx_port_vtu_getnext(ds, port, *vid, &vtu);
1961 if (ret < 0)
1962 goto unlock;
1963
1964 *vid = vtu.vid;
1965 fid = vtu.fid;
Vivien Didelot1d194042015-08-10 09:09:51 -04001966 }
1967
1968 ret = _mv88e6xxx_atu_getnext(ds, fid, addr, &next);
1969 if (ret < 0)
1970 goto unlock;
1971
1972 ether_addr_copy(addr, next.mac);
1973
1974 if (next.state == GLOBAL_ATU_DATA_STATE_UNUSED)
1975 continue;
1976 } while (next.trunk || (next.portv_trunkid & BIT(port)) == 0);
1977
1978 *is_static = next.state == (is_multicast_ether_addr(addr) ?
1979 GLOBAL_ATU_DATA_STATE_MC_STATIC :
1980 GLOBAL_ATU_DATA_STATE_UC_STATIC);
1981unlock:
Guenter Roeckdefb05b2015-03-26 18:36:38 -07001982 mutex_unlock(&ps->smi_mutex);
1983
1984 return ret;
1985}
1986
Guenter Roeckfacd95b2015-03-26 18:36:35 -07001987static void mv88e6xxx_bridge_work(struct work_struct *work)
1988{
1989 struct mv88e6xxx_priv_state *ps;
1990 struct dsa_switch *ds;
1991 int port;
1992
1993 ps = container_of(work, struct mv88e6xxx_priv_state, bridge_work);
1994 ds = ((struct dsa_switch *)ps) - 1;
1995
1996 while (ps->port_state_update_mask) {
1997 port = __ffs(ps->port_state_update_mask);
1998 clear_bit(port, &ps->port_state_update_mask);
1999 mv88e6xxx_set_port_state(ds, port, ps->port_state[port]);
2000 }
2001}
2002
Andrew Lunndbde9e62015-05-06 01:09:48 +02002003static int mv88e6xxx_setup_port(struct dsa_switch *ds, int port)
Guenter Roeckd827e882015-03-26 18:36:29 -07002004{
2005 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002006 int ret, fid;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002007 u16 reg;
Guenter Roeckd827e882015-03-26 18:36:29 -07002008
2009 mutex_lock(&ps->smi_mutex);
2010
Andrew Lunn54d792f2015-05-06 01:09:47 +02002011 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2012 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2013 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002014 mv88e6xxx_6065_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002015 /* MAC Forcing register: don't force link, speed,
2016 * duplex or flow control state to any particular
2017 * values on physical ports, but force the CPU port
2018 * and all DSA ports to their maximum bandwidth and
2019 * full duplex.
2020 */
2021 reg = _mv88e6xxx_reg_read(ds, REG_PORT(port), PORT_PCS_CTRL);
Andrew Lunn60045cb2015-08-17 23:52:51 +02002022 if (dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)) {
Russell King53adc9e2015-09-21 21:42:59 +01002023 reg &= ~PORT_PCS_CTRL_UNFORCED;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002024 reg |= PORT_PCS_CTRL_FORCE_LINK |
2025 PORT_PCS_CTRL_LINK_UP |
2026 PORT_PCS_CTRL_DUPLEX_FULL |
2027 PORT_PCS_CTRL_FORCE_DUPLEX;
2028 if (mv88e6xxx_6065_family(ds))
2029 reg |= PORT_PCS_CTRL_100;
2030 else
2031 reg |= PORT_PCS_CTRL_1000;
2032 } else {
2033 reg |= PORT_PCS_CTRL_UNFORCED;
2034 }
2035
2036 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2037 PORT_PCS_CTRL, reg);
2038 if (ret)
2039 goto abort;
2040 }
2041
2042 /* Port Control: disable Drop-on-Unlock, disable Drop-on-Lock,
2043 * disable Header mode, enable IGMP/MLD snooping, disable VLAN
2044 * tunneling, determine priority by looking at 802.1p and IP
2045 * priority fields (IP prio has precedence), and set STP state
2046 * to Forwarding.
2047 *
2048 * If this is the CPU link, use DSA or EDSA tagging depending
2049 * on which tagging mode was configured.
2050 *
2051 * If this is a link to another switch, use DSA tagging mode.
2052 *
2053 * If this is the upstream port for this switch, enable
2054 * forwarding of unknown unicasts and multicasts.
2055 */
2056 reg = 0;
2057 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2058 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2059 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002060 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002061 reg = PORT_CONTROL_IGMP_MLD_SNOOP |
2062 PORT_CONTROL_USE_TAG | PORT_CONTROL_USE_IP |
2063 PORT_CONTROL_STATE_FORWARDING;
2064 if (dsa_is_cpu_port(ds, port)) {
2065 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2066 reg |= PORT_CONTROL_DSA_TAG;
2067 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002068 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2069 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002070 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2071 reg |= PORT_CONTROL_FRAME_ETHER_TYPE_DSA;
2072 else
2073 reg |= PORT_CONTROL_FRAME_MODE_DSA;
Andrew Lunnc047a1f2015-09-29 01:50:56 +02002074 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2075 PORT_CONTROL_FORWARD_UNKNOWN_MC;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002076 }
2077
2078 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2079 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2080 mv88e6xxx_6095_family(ds) || mv88e6xxx_6065_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002081 mv88e6xxx_6185_family(ds) || mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002082 if (ds->dst->tag_protocol == DSA_TAG_PROTO_EDSA)
2083 reg |= PORT_CONTROL_EGRESS_ADD_TAG;
2084 }
2085 }
Andrew Lunn6083ce72015-08-17 23:52:52 +02002086 if (dsa_is_dsa_port(ds, port)) {
2087 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds))
2088 reg |= PORT_CONTROL_DSA_TAG;
2089 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2090 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2091 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002092 reg |= PORT_CONTROL_FRAME_MODE_DSA;
Andrew Lunn6083ce72015-08-17 23:52:52 +02002093 }
2094
Andrew Lunn54d792f2015-05-06 01:09:47 +02002095 if (port == dsa_upstream_port(ds))
2096 reg |= PORT_CONTROL_FORWARD_UNKNOWN |
2097 PORT_CONTROL_FORWARD_UNKNOWN_MC;
2098 }
2099 if (reg) {
2100 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2101 PORT_CONTROL, reg);
2102 if (ret)
2103 goto abort;
2104 }
2105
Vivien Didelot8efdda42015-08-13 12:52:23 -04002106 /* Port Control 2: don't force a good FCS, set the maximum frame size to
2107 * 10240 bytes, enable secure 802.1q tags, don't discard tagged or
2108 * untagged frames on this port, do a destination address lookup on all
2109 * received packets as usual, disable ARP mirroring and don't send a
2110 * copy of all transmitted/received frames on this port to the CPU.
Andrew Lunn54d792f2015-05-06 01:09:47 +02002111 */
2112 reg = 0;
2113 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2114 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002115 mv88e6xxx_6095_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002116 reg = PORT_CONTROL_2_MAP_DA;
2117
2118 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002119 mv88e6xxx_6165_family(ds) || mv88e6xxx_6320_family(ds))
Andrew Lunn54d792f2015-05-06 01:09:47 +02002120 reg |= PORT_CONTROL_2_JUMBO_10240;
2121
2122 if (mv88e6xxx_6095_family(ds) || mv88e6xxx_6185_family(ds)) {
2123 /* Set the upstream port this port should use */
2124 reg |= dsa_upstream_port(ds);
2125 /* enable forwarding of unknown multicast addresses to
2126 * the upstream port
2127 */
2128 if (port == dsa_upstream_port(ds))
2129 reg |= PORT_CONTROL_2_FORWARD_UNKNOWN;
2130 }
2131
Vivien Didelotf5117ce2015-08-19 18:54:55 -04002132 reg |= PORT_CONTROL_2_8021Q_FALLBACK;
Vivien Didelot8efdda42015-08-13 12:52:23 -04002133
Andrew Lunn54d792f2015-05-06 01:09:47 +02002134 if (reg) {
2135 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2136 PORT_CONTROL_2, reg);
2137 if (ret)
2138 goto abort;
2139 }
2140
2141 /* Port Association Vector: when learning source addresses
2142 * of packets, add the address to the address database using
2143 * a port bitmap that has only the bit for this port set and
2144 * the other bits clear.
2145 */
2146 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_ASSOC_VECTOR,
2147 1 << port);
2148 if (ret)
2149 goto abort;
2150
2151 /* Egress rate control 2: disable egress rate control. */
2152 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_RATE_CONTROL_2,
2153 0x0000);
2154 if (ret)
2155 goto abort;
2156
2157 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002158 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2159 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002160 /* Do not limit the period of time that this port can
2161 * be paused for by the remote end or the period of
2162 * time that this port can pause the remote end.
2163 */
2164 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2165 PORT_PAUSE_CTRL, 0x0000);
2166 if (ret)
2167 goto abort;
2168
2169 /* Port ATU control: disable limiting the number of
2170 * address database entries that this port is allowed
2171 * to use.
2172 */
2173 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2174 PORT_ATU_CONTROL, 0x0000);
2175 /* Priority Override: disable DA, SA and VTU priority
2176 * override.
2177 */
2178 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2179 PORT_PRI_OVERRIDE, 0x0000);
2180 if (ret)
2181 goto abort;
2182
2183 /* Port Ethertype: use the Ethertype DSA Ethertype
2184 * value.
2185 */
2186 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2187 PORT_ETH_TYPE, ETH_P_EDSA);
2188 if (ret)
2189 goto abort;
2190 /* Tag Remap: use an identity 802.1p prio -> switch
2191 * prio mapping.
2192 */
2193 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2194 PORT_TAG_REGMAP_0123, 0x3210);
2195 if (ret)
2196 goto abort;
2197
2198 /* Tag Remap 2: use an identity 802.1p prio -> switch
2199 * prio mapping.
2200 */
2201 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2202 PORT_TAG_REGMAP_4567, 0x7654);
2203 if (ret)
2204 goto abort;
2205 }
2206
2207 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2208 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002209 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2210 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002211 /* Rate Control: disable ingress rate limiting. */
2212 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port),
2213 PORT_RATE_CONTROL, 0x0001);
2214 if (ret)
2215 goto abort;
2216 }
2217
Guenter Roeck366f0a02015-03-26 18:36:30 -07002218 /* Port Control 1: disable trunking, disable sending
2219 * learning messages to this port.
Guenter Roeckd827e882015-03-26 18:36:29 -07002220 */
Vivien Didelot614f03f2015-04-20 17:19:23 -04002221 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_CONTROL_1, 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07002222 if (ret)
2223 goto abort;
2224
2225 /* Port based VLAN map: give each port its own address
2226 * database, allow the CPU port to talk to each of the 'real'
2227 * ports, and allow each of the 'real' ports to only talk to
2228 * the upstream port.
2229 */
Vivien Didelot194fea72015-08-10 09:09:47 -04002230 fid = port + 1;
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002231 ps->fid[port] = fid;
Vivien Didelot194fea72015-08-10 09:09:47 -04002232 set_bit(fid, ps->fid_bitmap);
Guenter Roeckd827e882015-03-26 18:36:29 -07002233
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002234 if (!dsa_is_cpu_port(ds, port))
2235 ps->bridge_mask[fid] = 1 << port;
2236
2237 ret = _mv88e6xxx_update_port_config(ds, port);
Guenter Roeckd827e882015-03-26 18:36:29 -07002238 if (ret)
2239 goto abort;
2240
2241 /* Default VLAN ID and priority: don't set a default VLAN
2242 * ID, and set the default packet priority to zero.
2243 */
Vivien Didelot47cf1e652015-04-20 17:43:26 -04002244 ret = _mv88e6xxx_reg_write(ds, REG_PORT(port), PORT_DEFAULT_VLAN,
2245 0x0000);
Guenter Roeckd827e882015-03-26 18:36:29 -07002246abort:
2247 mutex_unlock(&ps->smi_mutex);
2248 return ret;
2249}
2250
Andrew Lunndbde9e62015-05-06 01:09:48 +02002251int mv88e6xxx_setup_ports(struct dsa_switch *ds)
2252{
2253 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2254 int ret;
2255 int i;
2256
2257 for (i = 0; i < ps->num_ports; i++) {
2258 ret = mv88e6xxx_setup_port(ds, i);
2259 if (ret < 0)
2260 return ret;
2261 }
2262 return 0;
2263}
2264
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002265static int mv88e6xxx_regs_show(struct seq_file *s, void *p)
2266{
2267 struct dsa_switch *ds = s->private;
2268
2269 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2270 int reg, port;
2271
2272 seq_puts(s, " GLOBAL GLOBAL2 ");
2273 for (port = 0 ; port < ps->num_ports; port++)
2274 seq_printf(s, " %2d ", port);
2275 seq_puts(s, "\n");
2276
2277 for (reg = 0; reg < 32; reg++) {
2278 seq_printf(s, "%2x: ", reg);
2279 seq_printf(s, " %4x %4x ",
2280 mv88e6xxx_reg_read(ds, REG_GLOBAL, reg),
2281 mv88e6xxx_reg_read(ds, REG_GLOBAL2, reg));
2282
2283 for (port = 0 ; port < ps->num_ports; port++)
2284 seq_printf(s, "%4x ",
2285 mv88e6xxx_reg_read(ds, REG_PORT(port), reg));
2286 seq_puts(s, "\n");
2287 }
2288
2289 return 0;
2290}
2291
2292static int mv88e6xxx_regs_open(struct inode *inode, struct file *file)
2293{
2294 return single_open(file, mv88e6xxx_regs_show, inode->i_private);
2295}
2296
2297static const struct file_operations mv88e6xxx_regs_fops = {
2298 .open = mv88e6xxx_regs_open,
2299 .read = seq_read,
2300 .llseek = no_llseek,
2301 .release = single_release,
2302 .owner = THIS_MODULE,
2303};
2304
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002305static void mv88e6xxx_atu_show_header(struct seq_file *s)
2306{
2307 seq_puts(s, "DB T/P Vec State Addr\n");
2308}
2309
2310static void mv88e6xxx_atu_show_entry(struct seq_file *s, int dbnum,
2311 unsigned char *addr, int data)
2312{
2313 bool trunk = !!(data & GLOBAL_ATU_DATA_TRUNK);
2314 int portvec = ((data & GLOBAL_ATU_DATA_PORT_VECTOR_MASK) >>
2315 GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT);
2316 int state = data & GLOBAL_ATU_DATA_STATE_MASK;
2317
2318 seq_printf(s, "%03x %5s %10pb %x %pM\n",
2319 dbnum, (trunk ? "Trunk" : "Port"), &portvec, state, addr);
2320}
2321
2322static int mv88e6xxx_atu_show_db(struct seq_file *s, struct dsa_switch *ds,
2323 int dbnum)
2324{
2325 unsigned char bcast[] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
2326 unsigned char addr[6];
2327 int ret, data, state;
2328
Vivien Didelotc5723ac2015-08-10 09:09:48 -04002329 ret = _mv88e6xxx_atu_mac_write(ds, bcast);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002330 if (ret < 0)
2331 return ret;
2332
2333 do {
Vivien Didelot70cc99d2015-09-04 14:34:10 -04002334 ret = _mv88e6xxx_reg_write(ds, REG_GLOBAL, GLOBAL_ATU_FID,
2335 dbnum);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002336 if (ret < 0)
2337 return ret;
Vivien Didelot70cc99d2015-09-04 14:34:10 -04002338
2339 ret = _mv88e6xxx_atu_cmd(ds, GLOBAL_ATU_OP_GET_NEXT_DB);
2340 if (ret < 0)
2341 return ret;
2342
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002343 data = _mv88e6xxx_reg_read(ds, REG_GLOBAL, GLOBAL_ATU_DATA);
2344 if (data < 0)
2345 return data;
2346
2347 state = data & GLOBAL_ATU_DATA_STATE_MASK;
2348 if (state == GLOBAL_ATU_DATA_STATE_UNUSED)
2349 break;
Vivien Didelotc5723ac2015-08-10 09:09:48 -04002350 ret = _mv88e6xxx_atu_mac_read(ds, addr);
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002351 if (ret < 0)
2352 return ret;
2353 mv88e6xxx_atu_show_entry(s, dbnum, addr, data);
2354 } while (state != GLOBAL_ATU_DATA_STATE_UNUSED);
2355
2356 return 0;
2357}
2358
2359static int mv88e6xxx_atu_show(struct seq_file *s, void *p)
2360{
2361 struct dsa_switch *ds = s->private;
2362 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2363 int dbnum;
2364
2365 mv88e6xxx_atu_show_header(s);
2366
2367 for (dbnum = 0; dbnum < 255; dbnum++) {
2368 mutex_lock(&ps->smi_mutex);
2369 mv88e6xxx_atu_show_db(s, ds, dbnum);
2370 mutex_unlock(&ps->smi_mutex);
2371 }
2372
2373 return 0;
2374}
2375
2376static int mv88e6xxx_atu_open(struct inode *inode, struct file *file)
2377{
2378 return single_open(file, mv88e6xxx_atu_show, inode->i_private);
2379}
2380
2381static const struct file_operations mv88e6xxx_atu_fops = {
2382 .open = mv88e6xxx_atu_open,
2383 .read = seq_read,
2384 .llseek = no_llseek,
2385 .release = single_release,
2386 .owner = THIS_MODULE,
2387};
2388
Andrew Lunn532c7a32015-06-20 18:42:31 +02002389static void mv88e6xxx_stats_show_header(struct seq_file *s,
2390 struct mv88e6xxx_priv_state *ps)
2391{
2392 int port;
2393
2394 seq_puts(s, " Statistic ");
2395 for (port = 0 ; port < ps->num_ports; port++)
2396 seq_printf(s, "Port %2d ", port);
2397 seq_puts(s, "\n");
2398}
2399
2400static int mv88e6xxx_stats_show(struct seq_file *s, void *p)
2401{
2402 struct dsa_switch *ds = s->private;
2403 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2404 struct mv88e6xxx_hw_stat *stats = mv88e6xxx_hw_stats;
2405 int port, stat, max_stats;
2406 uint64_t value;
2407
2408 if (have_sw_in_discards(ds))
2409 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats);
2410 else
2411 max_stats = ARRAY_SIZE(mv88e6xxx_hw_stats) - 3;
2412
2413 mv88e6xxx_stats_show_header(s, ps);
2414
2415 mutex_lock(&ps->smi_mutex);
2416
2417 for (stat = 0; stat < max_stats; stat++) {
2418 seq_printf(s, "%19s: ", stats[stat].string);
2419 for (port = 0 ; port < ps->num_ports; port++) {
2420 _mv88e6xxx_stats_snapshot(ds, port);
2421 value = _mv88e6xxx_get_ethtool_stat(ds, stat, stats,
2422 port);
2423 seq_printf(s, "%8llu ", value);
2424 }
2425 seq_puts(s, "\n");
2426 }
2427 mutex_unlock(&ps->smi_mutex);
2428
2429 return 0;
2430}
2431
2432static int mv88e6xxx_stats_open(struct inode *inode, struct file *file)
2433{
2434 return single_open(file, mv88e6xxx_stats_show, inode->i_private);
2435}
2436
2437static const struct file_operations mv88e6xxx_stats_fops = {
2438 .open = mv88e6xxx_stats_open,
2439 .read = seq_read,
2440 .llseek = no_llseek,
2441 .release = single_release,
2442 .owner = THIS_MODULE,
2443};
2444
Andrew Lunnd35bd872015-06-20 18:42:32 +02002445static int mv88e6xxx_device_map_show(struct seq_file *s, void *p)
2446{
2447 struct dsa_switch *ds = s->private;
2448 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2449 int target, ret;
2450
2451 seq_puts(s, "Target Port\n");
2452
2453 mutex_lock(&ps->smi_mutex);
2454 for (target = 0; target < 32; target++) {
2455 ret = _mv88e6xxx_reg_write(
2456 ds, REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2457 target << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT);
2458 if (ret < 0)
2459 goto out;
2460 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2461 GLOBAL2_DEVICE_MAPPING);
2462 seq_printf(s, " %2d %2d\n", target,
2463 ret & GLOBAL2_DEVICE_MAPPING_PORT_MASK);
2464 }
2465out:
2466 mutex_unlock(&ps->smi_mutex);
2467
2468 return 0;
2469}
2470
2471static int mv88e6xxx_device_map_open(struct inode *inode, struct file *file)
2472{
2473 return single_open(file, mv88e6xxx_device_map_show, inode->i_private);
2474}
2475
2476static const struct file_operations mv88e6xxx_device_map_fops = {
2477 .open = mv88e6xxx_device_map_open,
2478 .read = seq_read,
2479 .llseek = no_llseek,
2480 .release = single_release,
2481 .owner = THIS_MODULE,
2482};
2483
Andrew Lunn56d95e22015-06-20 18:42:33 +02002484static int mv88e6xxx_scratch_show(struct seq_file *s, void *p)
2485{
2486 struct dsa_switch *ds = s->private;
2487 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2488 int reg, ret;
2489
2490 seq_puts(s, "Register Value\n");
2491
2492 mutex_lock(&ps->smi_mutex);
2493 for (reg = 0; reg < 0x80; reg++) {
2494 ret = _mv88e6xxx_reg_write(
2495 ds, REG_GLOBAL2, GLOBAL2_SCRATCH_MISC,
2496 reg << GLOBAL2_SCRATCH_REGISTER_SHIFT);
2497 if (ret < 0)
2498 goto out;
2499
2500 ret = _mv88e6xxx_scratch_wait(ds);
2501 if (ret < 0)
2502 goto out;
2503
2504 ret = _mv88e6xxx_reg_read(ds, REG_GLOBAL2,
2505 GLOBAL2_SCRATCH_MISC);
2506 seq_printf(s, " %2x %2x\n", reg,
2507 ret & GLOBAL2_SCRATCH_VALUE_MASK);
2508 }
2509out:
2510 mutex_unlock(&ps->smi_mutex);
2511
2512 return 0;
2513}
2514
2515static int mv88e6xxx_scratch_open(struct inode *inode, struct file *file)
2516{
2517 return single_open(file, mv88e6xxx_scratch_show, inode->i_private);
2518}
2519
2520static const struct file_operations mv88e6xxx_scratch_fops = {
2521 .open = mv88e6xxx_scratch_open,
2522 .read = seq_read,
2523 .llseek = no_llseek,
2524 .release = single_release,
2525 .owner = THIS_MODULE,
2526};
2527
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002528int mv88e6xxx_setup_common(struct dsa_switch *ds)
2529{
2530 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002531 char *name;
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002532
2533 mutex_init(&ps->smi_mutex);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002534
Andrew Lunncca8b132015-04-02 04:06:39 +02002535 ps->id = REG_READ(REG_PORT(0), PORT_SWITCH_ID) & 0xfff0;
Andrew Lunna8f064c2015-03-26 18:36:40 -07002536
Guenter Roeckfacd95b2015-03-26 18:36:35 -07002537 INIT_WORK(&ps->bridge_work, mv88e6xxx_bridge_work);
2538
Andrew Lunn87c8cef2015-06-20 18:42:28 +02002539 name = kasprintf(GFP_KERNEL, "dsa%d", ds->index);
2540 ps->dbgfs = debugfs_create_dir(name, NULL);
2541 kfree(name);
2542
2543 debugfs_create_file("regs", S_IRUGO, ps->dbgfs, ds,
2544 &mv88e6xxx_regs_fops);
2545
Andrew Lunn8a0a2652015-06-20 18:42:29 +02002546 debugfs_create_file("atu", S_IRUGO, ps->dbgfs, ds,
2547 &mv88e6xxx_atu_fops);
2548
Andrew Lunn532c7a32015-06-20 18:42:31 +02002549 debugfs_create_file("stats", S_IRUGO, ps->dbgfs, ds,
2550 &mv88e6xxx_stats_fops);
2551
Andrew Lunnd35bd872015-06-20 18:42:32 +02002552 debugfs_create_file("device_map", S_IRUGO, ps->dbgfs, ds,
2553 &mv88e6xxx_device_map_fops);
Andrew Lunn56d95e22015-06-20 18:42:33 +02002554
2555 debugfs_create_file("scratch", S_IRUGO, ps->dbgfs, ds,
2556 &mv88e6xxx_scratch_fops);
Guenter Roeckacdaffc2015-03-26 18:36:28 -07002557 return 0;
2558}
2559
Andrew Lunn54d792f2015-05-06 01:09:47 +02002560int mv88e6xxx_setup_global(struct dsa_switch *ds)
2561{
2562 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
Vivien Didelot24751e22015-08-03 09:17:44 -04002563 int ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002564 int i;
2565
2566 /* Set the default address aging time to 5 minutes, and
2567 * enable address learn messages to be sent to all message
2568 * ports.
2569 */
2570 REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
2571 0x0140 | GLOBAL_ATU_CONTROL_LEARN2ALL);
2572
2573 /* Configure the IP ToS mapping registers. */
2574 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_0, 0x0000);
2575 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_1, 0x0000);
2576 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_2, 0x5555);
2577 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_3, 0x5555);
2578 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_4, 0xaaaa);
2579 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_5, 0xaaaa);
2580 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_6, 0xffff);
2581 REG_WRITE(REG_GLOBAL, GLOBAL_IP_PRI_7, 0xffff);
2582
2583 /* Configure the IEEE 802.1p priority mapping register. */
2584 REG_WRITE(REG_GLOBAL, GLOBAL_IEEE_PRI, 0xfa41);
2585
2586 /* Send all frames with destination addresses matching
2587 * 01:80:c2:00:00:0x to the CPU port.
2588 */
2589 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_0X, 0xffff);
2590
2591 /* Ignore removed tag data on doubly tagged packets, disable
2592 * flow control messages, force flow control priority to the
2593 * highest, and send all special multicast frames to the CPU
2594 * port at the highest priority.
2595 */
2596 REG_WRITE(REG_GLOBAL2, GLOBAL2_SWITCH_MGMT,
2597 0x7 | GLOBAL2_SWITCH_MGMT_RSVD2CPU | 0x70 |
2598 GLOBAL2_SWITCH_MGMT_FORCE_FLOW_CTRL_PRI);
2599
2600 /* Program the DSA routing table. */
2601 for (i = 0; i < 32; i++) {
2602 int nexthop = 0x1f;
2603
2604 if (ds->pd->rtable &&
2605 i != ds->index && i < ds->dst->pd->nr_chips)
2606 nexthop = ds->pd->rtable[i] & 0x1f;
2607
2608 REG_WRITE(REG_GLOBAL2, GLOBAL2_DEVICE_MAPPING,
2609 GLOBAL2_DEVICE_MAPPING_UPDATE |
2610 (i << GLOBAL2_DEVICE_MAPPING_TARGET_SHIFT) |
2611 nexthop);
2612 }
2613
2614 /* Clear all trunk masks. */
2615 for (i = 0; i < 8; i++)
2616 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MASK,
2617 0x8000 | (i << GLOBAL2_TRUNK_MASK_NUM_SHIFT) |
2618 ((1 << ps->num_ports) - 1));
2619
2620 /* Clear all trunk mappings. */
2621 for (i = 0; i < 16; i++)
2622 REG_WRITE(REG_GLOBAL2, GLOBAL2_TRUNK_MAPPING,
2623 GLOBAL2_TRUNK_MAPPING_UPDATE |
2624 (i << GLOBAL2_TRUNK_MAPPING_ID_SHIFT));
2625
2626 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002627 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
2628 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002629 /* Send all frames with destination addresses matching
2630 * 01:80:c2:00:00:2x to the CPU port.
2631 */
2632 REG_WRITE(REG_GLOBAL2, GLOBAL2_MGMT_EN_2X, 0xffff);
2633
2634 /* Initialise cross-chip port VLAN table to reset
2635 * defaults.
2636 */
2637 REG_WRITE(REG_GLOBAL2, GLOBAL2_PVT_ADDR, 0x9000);
2638
2639 /* Clear the priority override table. */
2640 for (i = 0; i < 16; i++)
2641 REG_WRITE(REG_GLOBAL2, GLOBAL2_PRIO_OVERRIDE,
2642 0x8000 | (i << 8));
2643 }
2644
2645 if (mv88e6xxx_6352_family(ds) || mv88e6xxx_6351_family(ds) ||
2646 mv88e6xxx_6165_family(ds) || mv88e6xxx_6097_family(ds) ||
Aleksey S. Kazantsev7c3d0d62015-07-07 20:38:15 -07002647 mv88e6xxx_6185_family(ds) || mv88e6xxx_6095_family(ds) ||
2648 mv88e6xxx_6320_family(ds)) {
Andrew Lunn54d792f2015-05-06 01:09:47 +02002649 /* Disable ingress rate limiting by resetting all
2650 * ingress rate limit registers to their initial
2651 * state.
2652 */
2653 for (i = 0; i < ps->num_ports; i++)
2654 REG_WRITE(REG_GLOBAL2, GLOBAL2_INGRESS_OP,
2655 0x9000 | (i << 8));
2656 }
2657
Andrew Lunndb687a52015-06-20 21:31:29 +02002658 /* Clear the statistics counters for all ports */
2659 REG_WRITE(REG_GLOBAL, GLOBAL_STATS_OP, GLOBAL_STATS_OP_FLUSH_ALL);
2660
2661 /* Wait for the flush to complete. */
Vivien Didelot24751e22015-08-03 09:17:44 -04002662 mutex_lock(&ps->smi_mutex);
2663 ret = _mv88e6xxx_stats_wait(ds);
Vivien Didelot6b17e862015-08-13 12:52:18 -04002664 if (ret < 0)
2665 goto unlock;
2666
Vivien Didelotc161d0a2015-09-04 14:34:13 -04002667 /* Clear all ATU entries */
2668 ret = _mv88e6xxx_atu_flush(ds, 0, true);
2669 if (ret < 0)
2670 goto unlock;
2671
Vivien Didelot6b17e862015-08-13 12:52:18 -04002672 /* Clear all the VTU and STU entries */
2673 ret = _mv88e6xxx_vtu_stu_flush(ds);
2674unlock:
Vivien Didelot24751e22015-08-03 09:17:44 -04002675 mutex_unlock(&ps->smi_mutex);
Andrew Lunndb687a52015-06-20 21:31:29 +02002676
Vivien Didelot24751e22015-08-03 09:17:44 -04002677 return ret;
Andrew Lunn54d792f2015-05-06 01:09:47 +02002678}
2679
Andrew Lunn143a8302015-04-02 04:06:34 +02002680int mv88e6xxx_switch_reset(struct dsa_switch *ds, bool ppu_active)
2681{
2682 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2683 u16 is_reset = (ppu_active ? 0x8800 : 0xc800);
2684 unsigned long timeout;
2685 int ret;
2686 int i;
2687
2688 /* Set all ports to the disabled state. */
2689 for (i = 0; i < ps->num_ports; i++) {
Andrew Lunncca8b132015-04-02 04:06:39 +02002690 ret = REG_READ(REG_PORT(i), PORT_CONTROL);
2691 REG_WRITE(REG_PORT(i), PORT_CONTROL, ret & 0xfffc);
Andrew Lunn143a8302015-04-02 04:06:34 +02002692 }
2693
2694 /* Wait for transmit queues to drain. */
2695 usleep_range(2000, 4000);
2696
2697 /* Reset the switch. Keep the PPU active if requested. The PPU
2698 * needs to be active to support indirect phy register access
2699 * through global registers 0x18 and 0x19.
2700 */
2701 if (ppu_active)
2702 REG_WRITE(REG_GLOBAL, 0x04, 0xc000);
2703 else
2704 REG_WRITE(REG_GLOBAL, 0x04, 0xc400);
2705
2706 /* Wait up to one second for reset to complete. */
2707 timeout = jiffies + 1 * HZ;
2708 while (time_before(jiffies, timeout)) {
2709 ret = REG_READ(REG_GLOBAL, 0x00);
2710 if ((ret & is_reset) == is_reset)
2711 break;
2712 usleep_range(1000, 2000);
2713 }
2714 if (time_after(jiffies, timeout))
2715 return -ETIMEDOUT;
2716
2717 return 0;
2718}
2719
Andrew Lunn491435852015-04-02 04:06:35 +02002720int mv88e6xxx_phy_page_read(struct dsa_switch *ds, int port, int page, int reg)
2721{
2722 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2723 int ret;
2724
Andrew Lunn3898c142015-05-06 01:09:53 +02002725 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002726 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002727 if (ret < 0)
2728 goto error;
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002729 ret = _mv88e6xxx_phy_read_indirect(ds, port, reg);
Andrew Lunn491435852015-04-02 04:06:35 +02002730error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002731 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002732 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002733 return ret;
2734}
2735
2736int mv88e6xxx_phy_page_write(struct dsa_switch *ds, int port, int page,
2737 int reg, int val)
2738{
2739 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2740 int ret;
2741
Andrew Lunn3898c142015-05-06 01:09:53 +02002742 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002743 ret = _mv88e6xxx_phy_write_indirect(ds, port, 0x16, page);
Andrew Lunn491435852015-04-02 04:06:35 +02002744 if (ret < 0)
2745 goto error;
2746
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002747 ret = _mv88e6xxx_phy_write_indirect(ds, port, reg, val);
Andrew Lunn491435852015-04-02 04:06:35 +02002748error:
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002749 _mv88e6xxx_phy_write_indirect(ds, port, 0x16, 0x0);
Andrew Lunn3898c142015-05-06 01:09:53 +02002750 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002751 return ret;
2752}
2753
2754static int mv88e6xxx_port_to_phy_addr(struct dsa_switch *ds, int port)
2755{
2756 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2757
2758 if (port >= 0 && port < ps->num_ports)
2759 return port;
2760 return -EINVAL;
2761}
2762
2763int
2764mv88e6xxx_phy_read(struct dsa_switch *ds, int port, int regnum)
2765{
2766 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2767 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2768 int ret;
2769
2770 if (addr < 0)
2771 return addr;
2772
Andrew Lunn3898c142015-05-06 01:09:53 +02002773 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002774 ret = _mv88e6xxx_phy_read(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002775 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002776 return ret;
2777}
2778
2779int
2780mv88e6xxx_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val)
2781{
2782 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2783 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2784 int ret;
2785
2786 if (addr < 0)
2787 return addr;
2788
Andrew Lunn3898c142015-05-06 01:09:53 +02002789 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002790 ret = _mv88e6xxx_phy_write(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002791 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002792 return ret;
2793}
2794
2795int
2796mv88e6xxx_phy_read_indirect(struct dsa_switch *ds, int port, int regnum)
2797{
2798 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2799 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2800 int ret;
2801
2802 if (addr < 0)
2803 return addr;
2804
Andrew Lunn3898c142015-05-06 01:09:53 +02002805 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002806 ret = _mv88e6xxx_phy_read_indirect(ds, addr, regnum);
Andrew Lunn3898c142015-05-06 01:09:53 +02002807 mutex_unlock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002808 return ret;
2809}
2810
2811int
2812mv88e6xxx_phy_write_indirect(struct dsa_switch *ds, int port, int regnum,
2813 u16 val)
2814{
2815 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2816 int addr = mv88e6xxx_port_to_phy_addr(ds, port);
2817 int ret;
2818
2819 if (addr < 0)
2820 return addr;
2821
Andrew Lunn3898c142015-05-06 01:09:53 +02002822 mutex_lock(&ps->smi_mutex);
Andrew Lunnfd3a0ee2015-04-02 04:06:36 +02002823 ret = _mv88e6xxx_phy_write_indirect(ds, addr, regnum, val);
Andrew Lunn3898c142015-05-06 01:09:53 +02002824 mutex_unlock(&ps->smi_mutex);
Andrew Lunn491435852015-04-02 04:06:35 +02002825 return ret;
2826}
2827
Guenter Roeckc22995c2015-07-25 09:42:28 -07002828#ifdef CONFIG_NET_DSA_HWMON
2829
2830static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
2831{
2832 struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
2833 int ret;
2834 int val;
2835
2836 *temp = 0;
2837
2838 mutex_lock(&ps->smi_mutex);
2839
2840 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x6);
2841 if (ret < 0)
2842 goto error;
2843
2844 /* Enable temperature sensor */
2845 ret = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2846 if (ret < 0)
2847 goto error;
2848
2849 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret | (1 << 5));
2850 if (ret < 0)
2851 goto error;
2852
2853 /* Wait for temperature to stabilize */
2854 usleep_range(10000, 12000);
2855
2856 val = _mv88e6xxx_phy_read(ds, 0x0, 0x1a);
2857 if (val < 0) {
2858 ret = val;
2859 goto error;
2860 }
2861
2862 /* Disable temperature sensor */
2863 ret = _mv88e6xxx_phy_write(ds, 0x0, 0x1a, ret & ~(1 << 5));
2864 if (ret < 0)
2865 goto error;
2866
2867 *temp = ((val & 0x1f) - 5) * 5;
2868
2869error:
2870 _mv88e6xxx_phy_write(ds, 0x0, 0x16, 0x0);
2871 mutex_unlock(&ps->smi_mutex);
2872 return ret;
2873}
2874
2875static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
2876{
2877 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2878 int ret;
2879
2880 *temp = 0;
2881
2882 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 27);
2883 if (ret < 0)
2884 return ret;
2885
2886 *temp = (ret & 0xff) - 25;
2887
2888 return 0;
2889}
2890
2891int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
2892{
2893 if (mv88e6xxx_6320_family(ds) || mv88e6xxx_6352_family(ds))
2894 return mv88e63xx_get_temp(ds, temp);
2895
2896 return mv88e61xx_get_temp(ds, temp);
2897}
2898
2899int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
2900{
2901 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2902 int ret;
2903
2904 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2905 return -EOPNOTSUPP;
2906
2907 *temp = 0;
2908
2909 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2910 if (ret < 0)
2911 return ret;
2912
2913 *temp = (((ret >> 8) & 0x1f) * 5) - 25;
2914
2915 return 0;
2916}
2917
2918int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
2919{
2920 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2921 int ret;
2922
2923 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2924 return -EOPNOTSUPP;
2925
2926 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2927 if (ret < 0)
2928 return ret;
2929 temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
2930 return mv88e6xxx_phy_page_write(ds, phy, 6, 26,
2931 (ret & 0xe0ff) | (temp << 8));
2932}
2933
2934int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
2935{
2936 int phy = mv88e6xxx_6320_family(ds) ? 3 : 0;
2937 int ret;
2938
2939 if (!mv88e6xxx_6320_family(ds) && !mv88e6xxx_6352_family(ds))
2940 return -EOPNOTSUPP;
2941
2942 *alarm = false;
2943
2944 ret = mv88e6xxx_phy_page_read(ds, phy, 6, 26);
2945 if (ret < 0)
2946 return ret;
2947
2948 *alarm = !!(ret & 0x40);
2949
2950 return 0;
2951}
2952#endif /* CONFIG_NET_DSA_HWMON */
2953
Ben Hutchings98e67302011-11-25 14:36:19 +00002954static int __init mv88e6xxx_init(void)
2955{
2956#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2957 register_switch_driver(&mv88e6131_switch_driver);
2958#endif
2959#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2960 register_switch_driver(&mv88e6123_61_65_switch_driver);
2961#endif
Guenter Roeck3ad50cc2014-10-29 10:44:56 -07002962#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2963 register_switch_driver(&mv88e6352_switch_driver);
2964#endif
Andrew Lunn42f27252014-09-12 23:58:44 +02002965#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2966 register_switch_driver(&mv88e6171_switch_driver);
2967#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002968 return 0;
2969}
2970module_init(mv88e6xxx_init);
2971
2972static void __exit mv88e6xxx_cleanup(void)
2973{
Andrew Lunn42f27252014-09-12 23:58:44 +02002974#if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
2975 unregister_switch_driver(&mv88e6171_switch_driver);
2976#endif
Vivien Didelot4212b542015-05-01 10:43:52 -04002977#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
2978 unregister_switch_driver(&mv88e6352_switch_driver);
2979#endif
Ben Hutchings98e67302011-11-25 14:36:19 +00002980#if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
2981 unregister_switch_driver(&mv88e6123_61_65_switch_driver);
2982#endif
2983#if IS_ENABLED(CONFIG_NET_DSA_MV88E6131)
2984 unregister_switch_driver(&mv88e6131_switch_driver);
2985#endif
2986}
2987module_exit(mv88e6xxx_cleanup);
Ben Hutchings3d825ed2011-11-25 14:37:16 +00002988
2989MODULE_AUTHOR("Lennert Buytenhek <buytenh@wantstofly.org>");
2990MODULE_DESCRIPTION("Driver for Marvell 88E6XXX ethernet switch chips");
2991MODULE_LICENSE("GPL");