blob: 0818a67a7b2d41229e55f8488b0fd78f2ce85f06 [file] [log] [blame]
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/printk.h>
11#include <linux/spi/spi.h>
12#include <linux/errno.h>
13#include <linux/gpio/consumer.h>
Vladimir Olteanad9f2992019-05-02 23:23:38 +030014#include <linux/phylink.h>
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030015#include <linux/of.h>
16#include <linux/of_net.h>
17#include <linux/of_mdio.h>
18#include <linux/of_device.h>
19#include <linux/netdev_features.h>
20#include <linux/netdevice.h>
21#include <linux/if_bridge.h>
22#include <linux/if_ether.h>
Vladimir Oltean227d07a2019-05-05 13:19:27 +030023#include <linux/dsa/8021q.h>
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030024#include "sja1105.h"
Vladimir Olteanffe10e62020-03-20 13:29:37 +020025#include "sja1105_sgmii.h"
Vladimir Oltean317ab5b2019-09-15 05:00:02 +030026#include "sja1105_tas.h"
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030027
Vladimir Oltean4d942352021-02-12 17:16:00 +020028#define SJA1105_UNKNOWN_MULTICAST 0x010000000000ull
29
Vladimir Olteanac02a452020-05-10 19:37:43 +030030static const struct dsa_switch_ops sja1105_switch_ops;
31
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030032static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
33 unsigned int startup_delay)
34{
35 gpiod_set_value_cansleep(gpio, 1);
36 /* Wait for minimum reset pulse length */
37 msleep(pulse_len);
38 gpiod_set_value_cansleep(gpio, 0);
39 /* Wait until chip is ready after reset */
40 msleep(startup_delay);
41}
42
43static void
44sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
45 int from, int to, bool allow)
46{
Vladimir Oltean4d942352021-02-12 17:16:00 +020047 if (allow)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030048 l2_fwd[from].reach_port |= BIT(to);
Vladimir Oltean4d942352021-02-12 17:16:00 +020049 else
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030050 l2_fwd[from].reach_port &= ~BIT(to);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030051}
52
53/* Structure used to temporarily transport device tree
54 * settings into sja1105_setup
55 */
56struct sja1105_dt_port {
57 phy_interface_t phy_mode;
58 sja1105_mii_role_t role;
59};
60
61static int sja1105_init_mac_settings(struct sja1105_private *priv)
62{
63 struct sja1105_mac_config_entry default_mac = {
64 /* Enable all 8 priority queues on egress.
65 * Every queue i holds top[i] - base[i] frames.
66 * Sum of top[i] - base[i] is 511 (max hardware limit).
67 */
68 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
69 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
70 .enabled = {true, true, true, true, true, true, true, true},
71 /* Keep standard IFG of 12 bytes on egress. */
72 .ifg = 0,
73 /* Always put the MAC speed in automatic mode, where it can be
Vladimir Oltean1fd4a172019-06-08 16:03:42 +030074 * adjusted at runtime by PHYLINK.
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030075 */
76 .speed = SJA1105_SPEED_AUTO,
77 /* No static correction for 1-step 1588 events */
78 .tp_delin = 0,
79 .tp_delout = 0,
80 /* Disable aging for critical TTEthernet traffic */
81 .maxage = 0xFF,
82 /* Internal VLAN (pvid) to apply to untagged ingress */
83 .vlanprio = 0,
Vladimir Olteane3502b82019-06-26 02:39:35 +030084 .vlanid = 1,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030085 .ing_mirr = false,
86 .egr_mirr = false,
87 /* Don't drop traffic with other EtherType than ETH_P_IP */
88 .drpnona664 = false,
89 /* Don't drop double-tagged traffic */
90 .drpdtag = false,
91 /* Don't drop untagged traffic */
92 .drpuntag = false,
93 /* Don't retag 802.1p (VID 0) traffic with the pvid */
94 .retag = false,
Vladimir Oltean640f7632019-05-05 13:19:28 +030095 /* Disable learning and I/O on user ports by default -
96 * STP will enable it.
97 */
98 .dyn_learn = false,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030099 .egress = false,
100 .ingress = false,
101 };
102 struct sja1105_mac_config_entry *mac;
103 struct sja1105_table *table;
104 int i;
105
106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108 /* Discard previous MAC Configuration Table */
109 if (table->entry_count) {
110 kfree(table->entries);
111 table->entry_count = 0;
112 }
113
114 table->entries = kcalloc(SJA1105_NUM_PORTS,
115 table->ops->unpacked_entry_size, GFP_KERNEL);
116 if (!table->entries)
117 return -ENOMEM;
118
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300119 table->entry_count = SJA1105_NUM_PORTS;
120
121 mac = table->entries;
122
Vladimir Oltean640f7632019-05-05 13:19:28 +0300123 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300124 mac[i] = default_mac;
Vladimir Oltean640f7632019-05-05 13:19:28 +0300125 if (i == dsa_upstream_port(priv->ds, i)) {
126 /* STP doesn't get called for CPU port, so we need to
127 * set the I/O parameters statically.
128 */
129 mac[i].dyn_learn = true;
130 mac[i].ingress = true;
131 mac[i].egress = true;
132 }
133 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300134
135 return 0;
136}
137
Vladimir Olteanffe10e62020-03-20 13:29:37 +0200138static bool sja1105_supports_sgmii(struct sja1105_private *priv, int port)
139{
140 if (priv->info->part_no != SJA1105R_PART_NO &&
141 priv->info->part_no != SJA1105S_PART_NO)
142 return false;
143
144 if (port != SJA1105_SGMII_PORT)
145 return false;
146
147 if (dsa_is_unused_port(priv->ds, port))
148 return false;
149
150 return true;
151}
152
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300153static int sja1105_init_mii_settings(struct sja1105_private *priv,
154 struct sja1105_dt_port *ports)
155{
156 struct device *dev = &priv->spidev->dev;
157 struct sja1105_xmii_params_entry *mii;
158 struct sja1105_table *table;
159 int i;
160
161 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
162
163 /* Discard previous xMII Mode Parameters Table */
164 if (table->entry_count) {
165 kfree(table->entries);
166 table->entry_count = 0;
167 }
168
169 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
170 table->ops->unpacked_entry_size, GFP_KERNEL);
171 if (!table->entries)
172 return -ENOMEM;
173
Vladimir Oltean1fd4a172019-06-08 16:03:42 +0300174 /* Override table based on PHYLINK DT bindings */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300175 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
176
177 mii = table->entries;
178
179 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Olteanee9d0cb2020-03-19 22:12:10 +0200180 if (dsa_is_unused_port(priv->ds, i))
181 continue;
182
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300183 switch (ports[i].phy_mode) {
184 case PHY_INTERFACE_MODE_MII:
185 mii->xmii_mode[i] = XMII_MODE_MII;
186 break;
187 case PHY_INTERFACE_MODE_RMII:
188 mii->xmii_mode[i] = XMII_MODE_RMII;
189 break;
190 case PHY_INTERFACE_MODE_RGMII:
191 case PHY_INTERFACE_MODE_RGMII_ID:
192 case PHY_INTERFACE_MODE_RGMII_RXID:
193 case PHY_INTERFACE_MODE_RGMII_TXID:
194 mii->xmii_mode[i] = XMII_MODE_RGMII;
195 break;
Vladimir Olteanffe10e62020-03-20 13:29:37 +0200196 case PHY_INTERFACE_MODE_SGMII:
197 if (!sja1105_supports_sgmii(priv, i))
198 return -EINVAL;
199 mii->xmii_mode[i] = XMII_MODE_SGMII;
200 break;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300201 default:
202 dev_err(dev, "Unsupported PHY mode %s!\n",
203 phy_modes(ports[i].phy_mode));
204 }
205
Vladimir Olteanffe10e62020-03-20 13:29:37 +0200206 /* Even though the SerDes port is able to drive SGMII autoneg
207 * like a PHY would, from the perspective of the XMII tables,
208 * the SGMII port should always be put in MAC mode.
209 */
210 if (ports[i].phy_mode == PHY_INTERFACE_MODE_SGMII)
211 mii->phy_mac[i] = XMII_MAC;
212 else
213 mii->phy_mac[i] = ports[i].role;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300214 }
215 return 0;
216}
217
218static int sja1105_init_static_fdb(struct sja1105_private *priv)
219{
Vladimir Oltean4d942352021-02-12 17:16:00 +0200220 struct sja1105_l2_lookup_entry *l2_lookup;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300221 struct sja1105_table *table;
Vladimir Oltean4d942352021-02-12 17:16:00 +0200222 int port;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300223
224 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
225
Vladimir Oltean4d942352021-02-12 17:16:00 +0200226 /* We only populate the FDB table through dynamic L2 Address Lookup
227 * entries, except for a special entry at the end which is a catch-all
228 * for unknown multicast and will be used to control flooding domain.
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300229 */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300230 if (table->entry_count) {
231 kfree(table->entries);
232 table->entry_count = 0;
233 }
Vladimir Oltean4d942352021-02-12 17:16:00 +0200234
235 if (!priv->info->can_limit_mcast_flood)
236 return 0;
237
238 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
239 GFP_KERNEL);
240 if (!table->entries)
241 return -ENOMEM;
242
243 table->entry_count = 1;
244 l2_lookup = table->entries;
245
246 /* All L2 multicast addresses have an odd first octet */
247 l2_lookup[0].macaddr = SJA1105_UNKNOWN_MULTICAST;
248 l2_lookup[0].mask_macaddr = SJA1105_UNKNOWN_MULTICAST;
249 l2_lookup[0].lockeds = true;
250 l2_lookup[0].index = SJA1105_MAX_L2_LOOKUP_COUNT - 1;
251
252 /* Flood multicast to every port by default */
253 for (port = 0; port < priv->ds->num_ports; port++)
254 if (!dsa_is_unused_port(priv->ds, port))
255 l2_lookup[0].destports |= BIT(port);
256
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300257 return 0;
258}
259
260static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
261{
262 struct sja1105_table *table;
Vladimir Oltean6c56e162019-06-26 02:39:37 +0300263 u64 max_fdb_entries = SJA1105_MAX_L2_LOOKUP_COUNT / SJA1105_NUM_PORTS;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300264 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
Vladimir Oltean84567212019-05-02 23:23:36 +0300265 /* Learned FDB entries are forgotten after 300 seconds */
266 .maxage = SJA1105_AGEING_TIME_MS(300000),
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300267 /* All entries within a FDB bin are available for learning */
268 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
Vladimir Oltean1da73822019-06-03 00:15:45 +0300269 /* And the P/Q/R/S equivalent setting: */
270 .start_dynspc = 0,
Vladimir Oltean6c56e162019-06-26 02:39:37 +0300271 .maxaddrp = {max_fdb_entries, max_fdb_entries, max_fdb_entries,
272 max_fdb_entries, max_fdb_entries, },
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300273 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
274 .poly = 0x97,
275 /* This selects between Independent VLAN Learning (IVL) and
276 * Shared VLAN Learning (SVL)
277 */
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +0300278 .shared_learn = true,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300279 /* Don't discard management traffic based on ENFPORT -
280 * we don't perform SMAC port enforcement anyway, so
281 * what we are setting here doesn't matter.
282 */
283 .no_enf_hostprt = false,
284 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
285 * Maybe correlate with no_linklocal_learn from bridge driver?
286 */
287 .no_mgmt_learn = true,
Vladimir Oltean1da73822019-06-03 00:15:45 +0300288 /* P/Q/R/S only */
289 .use_static = true,
290 /* Dynamically learned FDB entries can overwrite other (older)
291 * dynamic FDB entries
292 */
293 .owr_dyn = true,
294 .drpnolearn = true,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300295 };
296
297 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
298
299 if (table->entry_count) {
300 kfree(table->entries);
301 table->entry_count = 0;
302 }
303
304 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
305 table->ops->unpacked_entry_size, GFP_KERNEL);
306 if (!table->entries)
307 return -ENOMEM;
308
309 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
310
311 /* This table only has a single entry */
312 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
313 default_l2_lookup_params;
314
315 return 0;
316}
317
318static int sja1105_init_static_vlan(struct sja1105_private *priv)
319{
320 struct sja1105_table *table;
321 struct sja1105_vlan_lookup_entry pvid = {
322 .ving_mirr = 0,
323 .vegr_mirr = 0,
324 .vmemb_port = 0,
325 .vlan_bc = 0,
326 .tag_port = 0,
Vladimir Olteane3502b82019-06-26 02:39:35 +0300327 .vlanid = 1,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300328 };
Vladimir Olteanec5ae612020-05-12 20:20:29 +0300329 struct dsa_switch *ds = priv->ds;
330 int port;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300331
332 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
333
Vladimir Olteane3502b82019-06-26 02:39:35 +0300334 /* The static VLAN table will only contain the initial pvid of 1.
Vladimir Oltean6666ceb2019-05-02 23:23:34 +0300335 * All other VLANs are to be configured through dynamic entries,
336 * and kept in the static configuration table as backing memory.
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300337 */
338 if (table->entry_count) {
339 kfree(table->entries);
340 table->entry_count = 0;
341 }
342
Zheng Yongjunc75857b2020-12-29 21:52:38 +0800343 table->entries = kzalloc(table->ops->unpacked_entry_size,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300344 GFP_KERNEL);
345 if (!table->entries)
346 return -ENOMEM;
347
348 table->entry_count = 1;
349
Vladimir Olteane3502b82019-06-26 02:39:35 +0300350 /* VLAN 1: all DT-defined ports are members; no restrictions on
Vladimir Olteanec5ae612020-05-12 20:20:29 +0300351 * forwarding; always transmit as untagged.
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300352 */
Vladimir Olteanec5ae612020-05-12 20:20:29 +0300353 for (port = 0; port < ds->num_ports; port++) {
354 struct sja1105_bridge_vlan *v;
355
356 if (dsa_is_unused_port(ds, port))
357 continue;
358
359 pvid.vmemb_port |= BIT(port);
360 pvid.vlan_bc |= BIT(port);
361 pvid.tag_port &= ~BIT(port);
362
363 /* Let traffic that don't need dsa_8021q (e.g. STP, PTP) be
364 * transmitted as untagged.
365 */
366 v = kzalloc(sizeof(*v), GFP_KERNEL);
367 if (!v)
368 return -ENOMEM;
369
370 v->port = port;
371 v->vid = 1;
372 v->untagged = true;
373 if (dsa_is_cpu_port(ds, port))
374 v->pvid = true;
375 list_add(&v->list, &priv->dsa_8021q_vlans);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300376 }
377
378 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
379 return 0;
380}
381
382static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
383{
384 struct sja1105_l2_forwarding_entry *l2fwd;
385 struct sja1105_table *table;
386 int i, j;
387
388 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
389
390 if (table->entry_count) {
391 kfree(table->entries);
392 table->entry_count = 0;
393 }
394
395 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
396 table->ops->unpacked_entry_size, GFP_KERNEL);
397 if (!table->entries)
398 return -ENOMEM;
399
400 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
401
402 l2fwd = table->entries;
403
404 /* First 5 entries define the forwarding rules */
405 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
406 unsigned int upstream = dsa_upstream_port(priv->ds, i);
407
408 for (j = 0; j < SJA1105_NUM_TC; j++)
409 l2fwd[i].vlan_pmap[j] = j;
410
411 if (i == upstream)
412 continue;
413
414 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
415 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
Vladimir Oltean4d942352021-02-12 17:16:00 +0200416
417 l2fwd[i].bc_domain = BIT(upstream);
418 l2fwd[i].fl_domain = BIT(upstream);
419
420 l2fwd[upstream].bc_domain |= BIT(i);
421 l2fwd[upstream].fl_domain |= BIT(i);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300422 }
423 /* Next 8 entries define VLAN PCP mapping from ingress to egress.
424 * Create a one-to-one mapping.
425 */
426 for (i = 0; i < SJA1105_NUM_TC; i++)
427 for (j = 0; j < SJA1105_NUM_PORTS; j++)
428 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
429
430 return 0;
431}
432
433static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
434{
435 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
436 /* Disallow dynamic reconfiguration of vlan_pmap */
437 .max_dynp = 0,
438 /* Use a single memory partition for all ingress queues */
439 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
440 };
441 struct sja1105_table *table;
442
443 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
444
445 if (table->entry_count) {
446 kfree(table->entries);
447 table->entry_count = 0;
448 }
449
450 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
451 table->ops->unpacked_entry_size, GFP_KERNEL);
452 if (!table->entries)
453 return -ENOMEM;
454
455 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
456
457 /* This table only has a single entry */
458 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
459 default_l2fwd_params;
460
461 return 0;
462}
463
Vladimir Olteanaaa270c2020-05-12 20:20:37 +0300464void sja1105_frame_memory_partitioning(struct sja1105_private *priv)
465{
466 struct sja1105_l2_forwarding_params_entry *l2_fwd_params;
467 struct sja1105_vl_forwarding_params_entry *vl_fwd_params;
468 struct sja1105_table *table;
469 int max_mem;
470
471 /* VLAN retagging is implemented using a loopback port that consumes
472 * frame buffers. That leaves less for us.
473 */
474 if (priv->vlan_state == SJA1105_VLAN_BEST_EFFORT)
475 max_mem = SJA1105_MAX_FRAME_MEMORY_RETAGGING;
476 else
477 max_mem = SJA1105_MAX_FRAME_MEMORY;
478
479 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
480 l2_fwd_params = table->entries;
481 l2_fwd_params->part_spc[0] = max_mem;
482
483 /* If we have any critical-traffic virtual links, we need to reserve
484 * some frame buffer memory for them. At the moment, hardcode the value
485 * at 100 blocks of 128 bytes of memory each. This leaves 829 blocks
486 * remaining for best-effort traffic. TODO: figure out a more flexible
487 * way to perform the frame buffer partitioning.
488 */
489 if (!priv->static_config.tables[BLK_IDX_VL_FORWARDING].entry_count)
490 return;
491
492 table = &priv->static_config.tables[BLK_IDX_VL_FORWARDING_PARAMS];
493 vl_fwd_params = table->entries;
494
495 l2_fwd_params->part_spc[0] -= SJA1105_VL_FRAME_MEMORY;
496 vl_fwd_params->partspc[0] = SJA1105_VL_FRAME_MEMORY;
497}
498
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300499static int sja1105_init_general_params(struct sja1105_private *priv)
500{
501 struct sja1105_general_params_entry default_general_params = {
Vladimir Oltean511e6ca2019-10-04 03:33:47 +0300502 /* Allow dynamic changing of the mirror port */
503 .mirr_ptacu = true,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300504 .switchid = priv->ds->index,
Vladimir Oltean5f06c632019-09-15 05:00:01 +0300505 /* Priority queue for link-local management frames
506 * (both ingress to and egress from CPU - PTP, STP etc)
507 */
Vladimir Oltean08fde092019-06-08 15:04:41 +0300508 .hostprio = 7,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300509 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
510 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
Vladimir Oltean42824462019-06-08 15:04:32 +0300511 .incl_srcpt1 = false,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300512 .send_meta1 = false,
513 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
514 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
Vladimir Oltean42824462019-06-08 15:04:32 +0300515 .incl_srcpt0 = false,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300516 .send_meta0 = false,
517 /* The destination for traffic matching mac_fltres1 and
518 * mac_fltres0 on all ports except host_port. Such traffic
519 * receieved on host_port itself would be dropped, except
520 * by installing a temporary 'management route'
521 */
522 .host_port = dsa_upstream_port(priv->ds, 0),
Vladimir Oltean511e6ca2019-10-04 03:33:47 +0300523 /* Default to an invalid value */
524 .mirr_port = SJA1105_NUM_PORTS,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300525 /* Link-local traffic received on casc_port will be forwarded
526 * to host_port without embedding the source port and device ID
527 * info in the destination MAC address (presumably because it
528 * is a cascaded port and a downstream SJA switch already did
529 * that). Default to an invalid port (to disable the feature)
530 * and overwrite this if we find any DSA (cascaded) ports.
531 */
532 .casc_port = SJA1105_NUM_PORTS,
533 /* No TTEthernet */
Vladimir Olteandfacc5a2020-05-05 22:20:55 +0300534 .vllupformat = SJA1105_VL_FORMAT_PSFP,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300535 .vlmarker = 0,
536 .vlmask = 0,
537 /* Only update correctionField for 1-step PTP (L2 transport) */
538 .ignore2stf = 0,
Vladimir Oltean6666ceb2019-05-02 23:23:34 +0300539 /* Forcefully disable VLAN filtering by telling
540 * the switch that VLAN has a different EtherType.
541 */
542 .tpid = ETH_P_SJA1105,
543 .tpid2 = ETH_P_SJA1105,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300544 };
545 struct sja1105_table *table;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300546
547 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
548
549 if (table->entry_count) {
550 kfree(table->entries);
551 table->entry_count = 0;
552 }
553
554 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
555 table->ops->unpacked_entry_size, GFP_KERNEL);
556 if (!table->entries)
557 return -ENOMEM;
558
559 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
560
561 /* This table only has a single entry */
562 ((struct sja1105_general_params_entry *)table->entries)[0] =
563 default_general_params;
564
565 return 0;
566}
567
Vladimir Oltean79d55112020-03-24 00:59:21 +0200568static int sja1105_init_avb_params(struct sja1105_private *priv)
569{
570 struct sja1105_avb_params_entry *avb;
571 struct sja1105_table *table;
572
573 table = &priv->static_config.tables[BLK_IDX_AVB_PARAMS];
574
575 /* Discard previous AVB Parameters Table */
576 if (table->entry_count) {
577 kfree(table->entries);
578 table->entry_count = 0;
579 }
580
581 table->entries = kcalloc(SJA1105_MAX_AVB_PARAMS_COUNT,
582 table->ops->unpacked_entry_size, GFP_KERNEL);
583 if (!table->entries)
584 return -ENOMEM;
585
586 table->entry_count = SJA1105_MAX_AVB_PARAMS_COUNT;
587
588 avb = table->entries;
589
590 /* Configure the MAC addresses for meta frames */
591 avb->destmeta = SJA1105_META_DMAC;
592 avb->srcmeta = SJA1105_META_SMAC;
Vladimir Oltean747e5eb2020-03-24 00:59:24 +0200593 /* On P/Q/R/S, configure the direction of the PTP_CLK pin as input by
594 * default. This is because there might be boards with a hardware
595 * layout where enabling the pin as output might cause an electrical
596 * clash. On E/T the pin is always an output, which the board designers
597 * probably already knew, so even if there are going to be electrical
598 * issues, there's nothing we can do.
599 */
600 avb->cas_master = false;
Vladimir Oltean79d55112020-03-24 00:59:21 +0200601
602 return 0;
603}
604
Vladimir Olteana7cc0812020-03-29 14:52:01 +0300605/* The L2 policing table is 2-stage. The table is looked up for each frame
606 * according to the ingress port, whether it was broadcast or not, and the
607 * classified traffic class (given by VLAN PCP). This portion of the lookup is
608 * fixed, and gives access to the SHARINDX, an indirection register pointing
609 * within the policing table itself, which is used to resolve the policer that
610 * will be used for this frame.
611 *
612 * Stage 1 Stage 2
613 * +------------+--------+ +---------------------------------+
614 * |Port 0 TC 0 |SHARINDX| | Policer 0: Rate, Burst, MTU |
615 * +------------+--------+ +---------------------------------+
616 * |Port 0 TC 1 |SHARINDX| | Policer 1: Rate, Burst, MTU |
617 * +------------+--------+ +---------------------------------+
618 * ... | Policer 2: Rate, Burst, MTU |
619 * +------------+--------+ +---------------------------------+
620 * |Port 0 TC 7 |SHARINDX| | Policer 3: Rate, Burst, MTU |
621 * +------------+--------+ +---------------------------------+
622 * |Port 1 TC 0 |SHARINDX| | Policer 4: Rate, Burst, MTU |
623 * +------------+--------+ +---------------------------------+
624 * ... | Policer 5: Rate, Burst, MTU |
625 * +------------+--------+ +---------------------------------+
626 * |Port 1 TC 7 |SHARINDX| | Policer 6: Rate, Burst, MTU |
627 * +------------+--------+ +---------------------------------+
628 * ... | Policer 7: Rate, Burst, MTU |
629 * +------------+--------+ +---------------------------------+
630 * |Port 4 TC 7 |SHARINDX| ...
631 * +------------+--------+
632 * |Port 0 BCAST|SHARINDX| ...
633 * +------------+--------+
634 * |Port 1 BCAST|SHARINDX| ...
635 * +------------+--------+
636 * ... ...
637 * +------------+--------+ +---------------------------------+
638 * |Port 4 BCAST|SHARINDX| | Policer 44: Rate, Burst, MTU |
639 * +------------+--------+ +---------------------------------+
640 *
641 * In this driver, we shall use policers 0-4 as statically alocated port
642 * (matchall) policers. So we need to make the SHARINDX for all lookups
643 * corresponding to this ingress port (8 VLAN PCP lookups and 1 broadcast
644 * lookup) equal.
645 * The remaining policers (40) shall be dynamically allocated for flower
646 * policers, where the key is either vlan_prio or dst_mac ff:ff:ff:ff:ff:ff.
647 */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300648#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
649
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300650static int sja1105_init_l2_policing(struct sja1105_private *priv)
651{
652 struct sja1105_l2_policing_entry *policing;
653 struct sja1105_table *table;
Vladimir Olteana7cc0812020-03-29 14:52:01 +0300654 int port, tc;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300655
656 table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
657
658 /* Discard previous L2 Policing Table */
659 if (table->entry_count) {
660 kfree(table->entries);
661 table->entry_count = 0;
662 }
663
664 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
665 table->ops->unpacked_entry_size, GFP_KERNEL);
666 if (!table->entries)
667 return -ENOMEM;
668
669 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
670
671 policing = table->entries;
672
Vladimir Olteana7cc0812020-03-29 14:52:01 +0300673 /* Setup shared indices for the matchall policers */
674 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
675 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + port;
676
677 for (tc = 0; tc < SJA1105_NUM_TC; tc++)
678 policing[port * SJA1105_NUM_TC + tc].sharindx = port;
679
680 policing[bcast].sharindx = port;
681 }
682
683 /* Setup the matchall policer parameters */
684 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
Vladimir Olteanc279c722020-03-27 21:55:45 +0200685 int mtu = VLAN_ETH_FRAME_LEN + ETH_FCS_LEN;
686
Vladimir Olteana7cc0812020-03-29 14:52:01 +0300687 if (dsa_is_cpu_port(priv->ds, port))
Vladimir Olteanc279c722020-03-27 21:55:45 +0200688 mtu += VLAN_HLEN;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300689
Vladimir Olteana7cc0812020-03-29 14:52:01 +0300690 policing[port].smax = 65535; /* Burst size in bytes */
691 policing[port].rate = SJA1105_RATE_MBPS(1000);
692 policing[port].maxlen = mtu;
693 policing[port].partition = 0;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300694 }
Vladimir Olteana7cc0812020-03-29 14:52:01 +0300695
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300696 return 0;
697}
698
699static int sja1105_static_config_load(struct sja1105_private *priv,
700 struct sja1105_dt_port *ports)
701{
702 int rc;
703
704 sja1105_static_config_free(&priv->static_config);
705 rc = sja1105_static_config_init(&priv->static_config,
706 priv->info->static_ops,
707 priv->info->device_id);
708 if (rc)
709 return rc;
710
711 /* Build static configuration */
712 rc = sja1105_init_mac_settings(priv);
713 if (rc < 0)
714 return rc;
715 rc = sja1105_init_mii_settings(priv, ports);
716 if (rc < 0)
717 return rc;
718 rc = sja1105_init_static_fdb(priv);
719 if (rc < 0)
720 return rc;
721 rc = sja1105_init_static_vlan(priv);
722 if (rc < 0)
723 return rc;
724 rc = sja1105_init_l2_lookup_params(priv);
725 if (rc < 0)
726 return rc;
727 rc = sja1105_init_l2_forwarding(priv);
728 if (rc < 0)
729 return rc;
730 rc = sja1105_init_l2_forwarding_params(priv);
731 if (rc < 0)
732 return rc;
733 rc = sja1105_init_l2_policing(priv);
734 if (rc < 0)
735 return rc;
736 rc = sja1105_init_general_params(priv);
737 if (rc < 0)
738 return rc;
Vladimir Oltean79d55112020-03-24 00:59:21 +0200739 rc = sja1105_init_avb_params(priv);
740 if (rc < 0)
741 return rc;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300742
743 /* Send initial configuration to hardware via SPI */
744 return sja1105_static_config_upload(priv);
745}
746
Vladimir Olteanf5b86312019-05-02 23:23:32 +0300747static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
748 const struct sja1105_dt_port *ports)
749{
750 int i;
751
752 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Oleksij Rempel9bca3a02019-11-25 12:43:51 +0100753 if (ports[i].role == XMII_MAC)
Vladimir Olteanf5b86312019-05-02 23:23:32 +0300754 continue;
755
Oleksij Rempel9bca3a02019-11-25 12:43:51 +0100756 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
757 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
Vladimir Olteanf5b86312019-05-02 23:23:32 +0300758 priv->rgmii_rx_delay[i] = true;
759
Oleksij Rempel9bca3a02019-11-25 12:43:51 +0100760 if (ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
761 ports[i].phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
Vladimir Olteanf5b86312019-05-02 23:23:32 +0300762 priv->rgmii_tx_delay[i] = true;
763
764 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
765 !priv->info->setup_rgmii_delay)
766 return -EINVAL;
767 }
768 return 0;
769}
770
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300771static int sja1105_parse_ports_node(struct sja1105_private *priv,
772 struct sja1105_dt_port *ports,
773 struct device_node *ports_node)
774{
775 struct device *dev = &priv->spidev->dev;
776 struct device_node *child;
777
Vladimir Oltean27afe0d2020-01-16 20:43:27 +0200778 for_each_available_child_of_node(ports_node, child) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300779 struct device_node *phy_node;
Andrew Lunn0c65b2b2019-11-04 02:40:33 +0100780 phy_interface_t phy_mode;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300781 u32 index;
Andrew Lunn0c65b2b2019-11-04 02:40:33 +0100782 int err;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300783
784 /* Get switch port number from DT */
785 if (of_property_read_u32(child, "reg", &index) < 0) {
786 dev_err(dev, "Port number not defined in device tree "
787 "(property \"reg\")\n");
Nishka Dasgupta7ba771e2019-07-23 16:14:48 +0530788 of_node_put(child);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300789 return -ENODEV;
790 }
791
792 /* Get PHY mode from DT */
Andrew Lunn0c65b2b2019-11-04 02:40:33 +0100793 err = of_get_phy_mode(child, &phy_mode);
794 if (err) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300795 dev_err(dev, "Failed to read phy-mode or "
796 "phy-interface-type property for port %d\n",
797 index);
Nishka Dasgupta7ba771e2019-07-23 16:14:48 +0530798 of_node_put(child);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300799 return -ENODEV;
800 }
801 ports[index].phy_mode = phy_mode;
802
803 phy_node = of_parse_phandle(child, "phy-handle", 0);
804 if (!phy_node) {
805 if (!of_phy_is_fixed_link(child)) {
806 dev_err(dev, "phy-handle or fixed-link "
807 "properties missing!\n");
Nishka Dasgupta7ba771e2019-07-23 16:14:48 +0530808 of_node_put(child);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300809 return -ENODEV;
810 }
811 /* phy-handle is missing, but fixed-link isn't.
812 * So it's a fixed link. Default to PHY role.
813 */
814 ports[index].role = XMII_PHY;
815 } else {
816 /* phy-handle present => put port in MAC role */
817 ports[index].role = XMII_MAC;
818 of_node_put(phy_node);
819 }
820
821 /* The MAC/PHY role can be overridden with explicit bindings */
822 if (of_property_read_bool(child, "sja1105,role-mac"))
823 ports[index].role = XMII_MAC;
824 else if (of_property_read_bool(child, "sja1105,role-phy"))
825 ports[index].role = XMII_PHY;
826 }
827
828 return 0;
829}
830
831static int sja1105_parse_dt(struct sja1105_private *priv,
832 struct sja1105_dt_port *ports)
833{
834 struct device *dev = &priv->spidev->dev;
835 struct device_node *switch_node = dev->of_node;
836 struct device_node *ports_node;
837 int rc;
838
839 ports_node = of_get_child_by_name(switch_node, "ports");
840 if (!ports_node) {
841 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
842 return -ENODEV;
843 }
844
845 rc = sja1105_parse_ports_node(priv, ports, ports_node);
846 of_node_put(ports_node);
847
848 return rc;
849}
850
Vladimir Olteanffe10e62020-03-20 13:29:37 +0200851static int sja1105_sgmii_read(struct sja1105_private *priv, int pcs_reg)
852{
853 const struct sja1105_regs *regs = priv->info->regs;
854 u32 val;
855 int rc;
856
857 rc = sja1105_xfer_u32(priv, SPI_READ, regs->sgmii + pcs_reg, &val,
858 NULL);
859 if (rc < 0)
860 return rc;
861
862 return val;
863}
864
865static int sja1105_sgmii_write(struct sja1105_private *priv, int pcs_reg,
866 u16 pcs_val)
867{
868 const struct sja1105_regs *regs = priv->info->regs;
869 u32 val = pcs_val;
870 int rc;
871
872 rc = sja1105_xfer_u32(priv, SPI_WRITE, regs->sgmii + pcs_reg, &val,
873 NULL);
874 if (rc < 0)
875 return rc;
876
877 return val;
878}
879
880static void sja1105_sgmii_pcs_config(struct sja1105_private *priv,
881 bool an_enabled, bool an_master)
882{
883 u16 ac = SJA1105_AC_AUTONEG_MODE_SGMII;
884
885 /* DIGITAL_CONTROL_1: Enable vendor-specific MMD1, allow the PHY to
886 * stop the clock during LPI mode, make the MAC reconfigure
887 * autonomously after PCS autoneg is done, flush the internal FIFOs.
888 */
889 sja1105_sgmii_write(priv, SJA1105_DC1, SJA1105_DC1_EN_VSMMD1 |
890 SJA1105_DC1_CLOCK_STOP_EN |
891 SJA1105_DC1_MAC_AUTO_SW |
892 SJA1105_DC1_INIT);
893 /* DIGITAL_CONTROL_2: No polarity inversion for TX and RX lanes */
894 sja1105_sgmii_write(priv, SJA1105_DC2, SJA1105_DC2_TX_POL_INV_DISABLE);
895 /* AUTONEG_CONTROL: Use SGMII autoneg */
896 if (an_master)
897 ac |= SJA1105_AC_PHY_MODE | SJA1105_AC_SGMII_LINK;
898 sja1105_sgmii_write(priv, SJA1105_AC, ac);
899 /* BASIC_CONTROL: enable in-band AN now, if requested. Otherwise,
900 * sja1105_sgmii_pcs_force_speed must be called later for the link
901 * to become operational.
902 */
903 if (an_enabled)
904 sja1105_sgmii_write(priv, MII_BMCR,
905 BMCR_ANENABLE | BMCR_ANRESTART);
906}
907
908static void sja1105_sgmii_pcs_force_speed(struct sja1105_private *priv,
909 int speed)
910{
911 int pcs_speed;
912
913 switch (speed) {
914 case SPEED_1000:
915 pcs_speed = BMCR_SPEED1000;
916 break;
917 case SPEED_100:
918 pcs_speed = BMCR_SPEED100;
919 break;
920 case SPEED_10:
921 pcs_speed = BMCR_SPEED10;
922 break;
923 default:
924 dev_err(priv->ds->dev, "Invalid speed %d\n", speed);
925 return;
926 }
927 sja1105_sgmii_write(priv, MII_BMCR, pcs_speed | BMCR_FULLDPLX);
928}
929
Vladimir Olteanc44d0532019-06-08 16:03:41 +0300930/* Convert link speed from SJA1105 to ethtool encoding */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300931static int sja1105_speed[] = {
Vladimir Olteanc44d0532019-06-08 16:03:41 +0300932 [SJA1105_SPEED_AUTO] = SPEED_UNKNOWN,
933 [SJA1105_SPEED_10MBPS] = SPEED_10,
934 [SJA1105_SPEED_100MBPS] = SPEED_100,
935 [SJA1105_SPEED_1000MBPS] = SPEED_1000,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300936};
937
Vladimir Oltean8400cff2019-06-08 16:03:44 +0300938/* Set link speed in the MAC configuration for a specific port. */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300939static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
Vladimir Oltean8400cff2019-06-08 16:03:44 +0300940 int speed_mbps)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300941{
942 struct sja1105_xmii_params_entry *mii;
943 struct sja1105_mac_config_entry *mac;
944 struct device *dev = priv->ds->dev;
945 sja1105_phy_interface_t phy_mode;
946 sja1105_speed_t speed;
947 int rc;
948
Vladimir Oltean8400cff2019-06-08 16:03:44 +0300949 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
950 * tables. On E/T, MAC reconfig tables are not readable, only writable.
951 * We have to *know* what the MAC looks like. For the sake of keeping
952 * the code common, we'll use the static configuration tables as a
953 * reasonable approximation for both E/T and P/Q/R/S.
954 */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300955 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
Vladimir Oltean8400cff2019-06-08 16:03:44 +0300956 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300957
Vladimir Olteanf4cfcfb2019-06-03 02:31:37 +0300958 switch (speed_mbps) {
Vladimir Olteanc44d0532019-06-08 16:03:41 +0300959 case SPEED_UNKNOWN:
Vladimir Olteana979a0a2019-06-28 00:46:35 +0300960 /* PHYLINK called sja1105_mac_config() to inform us about
961 * the state->interface, but AN has not completed and the
962 * speed is not yet valid. UM10944.pdf says that setting
963 * SJA1105_SPEED_AUTO at runtime disables the port, so that is
964 * ok for power consumption in case AN will never complete -
965 * otherwise PHYLINK should come back with a new update.
966 */
Vladimir Olteanf4cfcfb2019-06-03 02:31:37 +0300967 speed = SJA1105_SPEED_AUTO;
968 break;
Vladimir Olteanc44d0532019-06-08 16:03:41 +0300969 case SPEED_10:
Vladimir Olteanf4cfcfb2019-06-03 02:31:37 +0300970 speed = SJA1105_SPEED_10MBPS;
971 break;
Vladimir Olteanc44d0532019-06-08 16:03:41 +0300972 case SPEED_100:
Vladimir Olteanf4cfcfb2019-06-03 02:31:37 +0300973 speed = SJA1105_SPEED_100MBPS;
974 break;
Vladimir Olteanc44d0532019-06-08 16:03:41 +0300975 case SPEED_1000:
Vladimir Olteanf4cfcfb2019-06-03 02:31:37 +0300976 speed = SJA1105_SPEED_1000MBPS;
977 break;
978 default:
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300979 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
980 return -EINVAL;
981 }
982
Vladimir Oltean8400cff2019-06-08 16:03:44 +0300983 /* Overwrite SJA1105_SPEED_AUTO from the static MAC configuration
984 * table, since this will be used for the clocking setup, and we no
985 * longer need to store it in the static config (already told hardware
986 * we want auto during upload phase).
Vladimir Olteanffe10e62020-03-20 13:29:37 +0200987 * Actually for the SGMII port, the MAC is fixed at 1 Gbps and
988 * we need to configure the PCS only (if even that).
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300989 */
Vladimir Olteanffe10e62020-03-20 13:29:37 +0200990 if (sja1105_supports_sgmii(priv, port))
991 mac[port].speed = SJA1105_SPEED_1000MBPS;
992 else
993 mac[port].speed = speed;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300994
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300995 /* Write to the dynamic reconfiguration tables */
Vladimir Oltean8400cff2019-06-08 16:03:44 +0300996 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
997 &mac[port], true);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300998 if (rc < 0) {
999 dev_err(dev, "Failed to write MAC config: %d\n", rc);
1000 return rc;
1001 }
1002
1003 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
1004 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
1005 * RMII no change of the clock setup is required. Actually, changing
1006 * the clock setup does interrupt the clock signal for a certain time
1007 * which causes trouble for all PHYs relying on this signal.
1008 */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001009 phy_mode = mii->xmii_mode[port];
1010 if (phy_mode != XMII_MODE_RGMII)
1011 return 0;
1012
1013 return sja1105_clocking_setup_port(priv, port);
1014}
1015
Vladimir Oltean39710222019-06-28 00:46:36 +03001016/* The SJA1105 MAC programming model is through the static config (the xMII
1017 * Mode table cannot be dynamically reconfigured), and we have to program
1018 * that early (earlier than PHYLINK calls us, anyway).
1019 * So just error out in case the connected PHY attempts to change the initial
1020 * system interface MII protocol from what is defined in the DT, at least for
1021 * now.
1022 */
1023static bool sja1105_phy_mode_mismatch(struct sja1105_private *priv, int port,
1024 phy_interface_t interface)
1025{
1026 struct sja1105_xmii_params_entry *mii;
1027 sja1105_phy_interface_t phy_mode;
1028
1029 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1030 phy_mode = mii->xmii_mode[port];
1031
1032 switch (interface) {
1033 case PHY_INTERFACE_MODE_MII:
1034 return (phy_mode != XMII_MODE_MII);
1035 case PHY_INTERFACE_MODE_RMII:
1036 return (phy_mode != XMII_MODE_RMII);
1037 case PHY_INTERFACE_MODE_RGMII:
1038 case PHY_INTERFACE_MODE_RGMII_ID:
1039 case PHY_INTERFACE_MODE_RGMII_RXID:
1040 case PHY_INTERFACE_MODE_RGMII_TXID:
1041 return (phy_mode != XMII_MODE_RGMII);
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001042 case PHY_INTERFACE_MODE_SGMII:
1043 return (phy_mode != XMII_MODE_SGMII);
Vladimir Oltean39710222019-06-28 00:46:36 +03001044 default:
1045 return true;
1046 }
1047}
1048
Vladimir Olteanaf7cd032019-05-28 20:38:17 +03001049static void sja1105_mac_config(struct dsa_switch *ds, int port,
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001050 unsigned int mode,
Vladimir Olteanaf7cd032019-05-28 20:38:17 +03001051 const struct phylink_link_state *state)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001052{
1053 struct sja1105_private *priv = ds->priv;
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001054 bool is_sgmii = sja1105_supports_sgmii(priv, port);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001055
Vladimir Olteanec8582d2020-03-12 12:19:51 +00001056 if (sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1057 dev_err(ds->dev, "Changing PHY mode to %s not supported!\n",
1058 phy_modes(state->interface));
Vladimir Oltean39710222019-06-28 00:46:36 +03001059 return;
Vladimir Olteanec8582d2020-03-12 12:19:51 +00001060 }
Vladimir Oltean39710222019-06-28 00:46:36 +03001061
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001062 if (phylink_autoneg_inband(mode) && !is_sgmii) {
Vladimir Oltean9f971572019-06-28 00:46:37 +03001063 dev_err(ds->dev, "In-band AN not supported!\n");
1064 return;
1065 }
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001066
1067 if (is_sgmii)
1068 sja1105_sgmii_pcs_config(priv, phylink_autoneg_inband(mode),
1069 false);
Vladimir Oltean8400cff2019-06-08 16:03:44 +03001070}
1071
1072static void sja1105_mac_link_down(struct dsa_switch *ds, int port,
1073 unsigned int mode,
1074 phy_interface_t interface)
1075{
1076 sja1105_inhibit_tx(ds->priv, BIT(port), true);
1077}
1078
1079static void sja1105_mac_link_up(struct dsa_switch *ds, int port,
1080 unsigned int mode,
1081 phy_interface_t interface,
Russell King5b502a72020-02-26 10:23:46 +00001082 struct phy_device *phydev,
1083 int speed, int duplex,
1084 bool tx_pause, bool rx_pause)
Vladimir Oltean8400cff2019-06-08 16:03:44 +03001085{
Vladimir Olteanec8582d2020-03-12 12:19:51 +00001086 struct sja1105_private *priv = ds->priv;
1087
1088 sja1105_adjust_port_config(priv, port, speed);
1089
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001090 if (sja1105_supports_sgmii(priv, port) && !phylink_autoneg_inband(mode))
1091 sja1105_sgmii_pcs_force_speed(priv, speed);
1092
Vladimir Olteanec8582d2020-03-12 12:19:51 +00001093 sja1105_inhibit_tx(priv, BIT(port), false);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001094}
1095
Vladimir Olteanad9f2992019-05-02 23:23:38 +03001096static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
1097 unsigned long *supported,
1098 struct phylink_link_state *state)
1099{
1100 /* Construct a new mask which exhaustively contains all link features
1101 * supported by the MAC, and then apply that (logical AND) to what will
1102 * be sent to the PHY for "marketing".
1103 */
1104 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
1105 struct sja1105_private *priv = ds->priv;
1106 struct sja1105_xmii_params_entry *mii;
1107
1108 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
1109
Vladimir Oltean39710222019-06-28 00:46:36 +03001110 /* include/linux/phylink.h says:
1111 * When @state->interface is %PHY_INTERFACE_MODE_NA, phylink
1112 * expects the MAC driver to return all supported link modes.
1113 */
1114 if (state->interface != PHY_INTERFACE_MODE_NA &&
1115 sja1105_phy_mode_mismatch(priv, port, state->interface)) {
1116 bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS);
1117 return;
1118 }
1119
Vladimir Olteanad9f2992019-05-02 23:23:38 +03001120 /* The MAC does not support pause frames, and also doesn't
1121 * support half-duplex traffic modes.
1122 */
1123 phylink_set(mask, Autoneg);
1124 phylink_set(mask, MII);
1125 phylink_set(mask, 10baseT_Full);
1126 phylink_set(mask, 100baseT_Full);
Oleksij Rempelca68e132020-03-03 08:44:14 +01001127 phylink_set(mask, 100baseT1_Full);
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001128 if (mii->xmii_mode[port] == XMII_MODE_RGMII ||
1129 mii->xmii_mode[port] == XMII_MODE_SGMII)
Vladimir Olteanad9f2992019-05-02 23:23:38 +03001130 phylink_set(mask, 1000baseT_Full);
1131
1132 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
1133 bitmap_and(state->advertising, state->advertising, mask,
1134 __ETHTOOL_LINK_MODE_MASK_NBITS);
1135}
1136
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001137static int sja1105_mac_pcs_get_state(struct dsa_switch *ds, int port,
1138 struct phylink_link_state *state)
1139{
1140 struct sja1105_private *priv = ds->priv;
1141 int ais;
1142
1143 /* Read the vendor-specific AUTONEG_INTR_STATUS register */
1144 ais = sja1105_sgmii_read(priv, SJA1105_AIS);
1145 if (ais < 0)
1146 return ais;
1147
1148 switch (SJA1105_AIS_SPEED(ais)) {
1149 case 0:
1150 state->speed = SPEED_10;
1151 break;
1152 case 1:
1153 state->speed = SPEED_100;
1154 break;
1155 case 2:
1156 state->speed = SPEED_1000;
1157 break;
1158 default:
1159 dev_err(ds->dev, "Invalid SGMII PCS speed %lu\n",
1160 SJA1105_AIS_SPEED(ais));
1161 }
1162 state->duplex = SJA1105_AIS_DUPLEX_MODE(ais);
1163 state->an_complete = SJA1105_AIS_COMPLETE(ais);
1164 state->link = SJA1105_AIS_LINK_STATUS(ais);
1165
1166 return 0;
1167}
1168
Vladimir Oltean60f60532019-06-26 02:39:38 +03001169static int
1170sja1105_find_static_fdb_entry(struct sja1105_private *priv, int port,
1171 const struct sja1105_l2_lookup_entry *requested)
1172{
1173 struct sja1105_l2_lookup_entry *l2_lookup;
1174 struct sja1105_table *table;
1175 int i;
1176
1177 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1178 l2_lookup = table->entries;
1179
1180 for (i = 0; i < table->entry_count; i++)
1181 if (l2_lookup[i].macaddr == requested->macaddr &&
1182 l2_lookup[i].vlanid == requested->vlanid &&
1183 l2_lookup[i].destports & BIT(port))
1184 return i;
1185
1186 return -1;
1187}
1188
1189/* We want FDB entries added statically through the bridge command to persist
1190 * across switch resets, which are a common thing during normal SJA1105
1191 * operation. So we have to back them up in the static configuration tables
1192 * and hence apply them on next static config upload... yay!
1193 */
1194static int
1195sja1105_static_fdb_change(struct sja1105_private *priv, int port,
1196 const struct sja1105_l2_lookup_entry *requested,
1197 bool keep)
1198{
1199 struct sja1105_l2_lookup_entry *l2_lookup;
1200 struct sja1105_table *table;
1201 int rc, match;
1202
1203 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
1204
1205 match = sja1105_find_static_fdb_entry(priv, port, requested);
1206 if (match < 0) {
1207 /* Can't delete a missing entry. */
1208 if (!keep)
1209 return 0;
1210
1211 /* No match => new entry */
1212 rc = sja1105_table_resize(table, table->entry_count + 1);
1213 if (rc)
1214 return rc;
1215
1216 match = table->entry_count - 1;
1217 }
1218
1219 /* Assign pointer after the resize (it may be new memory) */
1220 l2_lookup = table->entries;
1221
1222 /* We have a match.
1223 * If the job was to add this FDB entry, it's already done (mostly
1224 * anyway, since the port forwarding mask may have changed, case in
1225 * which we update it).
1226 * Otherwise we have to delete it.
1227 */
1228 if (keep) {
1229 l2_lookup[match] = *requested;
1230 return 0;
1231 }
1232
1233 /* To remove, the strategy is to overwrite the element with
1234 * the last one, and then reduce the array size by 1
1235 */
1236 l2_lookup[match] = l2_lookup[table->entry_count - 1];
1237 return sja1105_table_resize(table, table->entry_count - 1);
1238}
1239
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001240/* First-generation switches have a 4-way set associative TCAM that
1241 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
1242 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
1243 * For the placement of a newly learnt FDB entry, the switch selects the bin
1244 * based on a hash function, and the way within that bin incrementally.
1245 */
Vladimir Oltean09c1b412019-10-01 22:17:59 +03001246static int sja1105et_fdb_index(int bin, int way)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001247{
1248 return bin * SJA1105ET_FDB_BIN_SIZE + way;
1249}
1250
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001251static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
1252 const u8 *addr, u16 vid,
1253 struct sja1105_l2_lookup_entry *match,
1254 int *last_unused)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001255{
1256 int way;
1257
1258 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
1259 struct sja1105_l2_lookup_entry l2_lookup = {0};
1260 int index = sja1105et_fdb_index(bin, way);
1261
1262 /* Skip unused entries, optionally marking them
1263 * into the return value
1264 */
1265 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1266 index, &l2_lookup)) {
1267 if (last_unused)
1268 *last_unused = way;
1269 continue;
1270 }
1271
1272 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
1273 l2_lookup.vlanid == vid) {
1274 if (match)
1275 *match = l2_lookup;
1276 return way;
1277 }
1278 }
1279 /* Return an invalid entry index if not found */
1280 return -1;
1281}
1282
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001283int sja1105et_fdb_add(struct dsa_switch *ds, int port,
1284 const unsigned char *addr, u16 vid)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001285{
1286 struct sja1105_l2_lookup_entry l2_lookup = {0};
1287 struct sja1105_private *priv = ds->priv;
1288 struct device *dev = ds->dev;
1289 int last_unused = -1;
Vladimir Oltean60f60532019-06-26 02:39:38 +03001290 int bin, way, rc;
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001291
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001292 bin = sja1105et_fdb_hash(priv, addr, vid);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001293
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001294 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1295 &l2_lookup, &last_unused);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001296 if (way >= 0) {
1297 /* We have an FDB entry. Is our port in the destination
1298 * mask? If yes, we need to do nothing. If not, we need
1299 * to rewrite the entry by adding this port to it.
1300 */
1301 if (l2_lookup.destports & BIT(port))
1302 return 0;
1303 l2_lookup.destports |= BIT(port);
1304 } else {
1305 int index = sja1105et_fdb_index(bin, way);
1306
1307 /* We don't have an FDB entry. We construct a new one and
1308 * try to find a place for it within the FDB table.
1309 */
1310 l2_lookup.macaddr = ether_addr_to_u64(addr);
1311 l2_lookup.destports = BIT(port);
1312 l2_lookup.vlanid = vid;
1313
1314 if (last_unused >= 0) {
1315 way = last_unused;
1316 } else {
1317 /* Bin is full, need to evict somebody.
1318 * Choose victim at random. If you get these messages
1319 * often, you may need to consider changing the
1320 * distribution function:
1321 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
1322 */
1323 get_random_bytes(&way, sizeof(u8));
1324 way %= SJA1105ET_FDB_BIN_SIZE;
1325 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
1326 bin, addr, way);
1327 /* Evict entry */
1328 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1329 index, NULL, false);
1330 }
1331 }
1332 l2_lookup.index = sja1105et_fdb_index(bin, way);
1333
Vladimir Oltean60f60532019-06-26 02:39:38 +03001334 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1335 l2_lookup.index, &l2_lookup,
1336 true);
1337 if (rc < 0)
1338 return rc;
1339
1340 return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001341}
1342
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001343int sja1105et_fdb_del(struct dsa_switch *ds, int port,
1344 const unsigned char *addr, u16 vid)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001345{
1346 struct sja1105_l2_lookup_entry l2_lookup = {0};
1347 struct sja1105_private *priv = ds->priv;
Vladimir Oltean60f60532019-06-26 02:39:38 +03001348 int index, bin, way, rc;
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001349 bool keep;
1350
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001351 bin = sja1105et_fdb_hash(priv, addr, vid);
1352 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
1353 &l2_lookup, NULL);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001354 if (way < 0)
1355 return 0;
1356 index = sja1105et_fdb_index(bin, way);
1357
1358 /* We have an FDB entry. Is our port in the destination mask? If yes,
1359 * we need to remove it. If the resulting port mask becomes empty, we
1360 * need to completely evict the FDB entry.
1361 * Otherwise we just write it back.
1362 */
Vladimir Oltean7752e932019-06-03 00:15:54 +03001363 l2_lookup.destports &= ~BIT(port);
1364
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001365 if (l2_lookup.destports)
1366 keep = true;
1367 else
1368 keep = false;
1369
Vladimir Oltean60f60532019-06-26 02:39:38 +03001370 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1371 index, &l2_lookup, keep);
1372 if (rc < 0)
1373 return rc;
1374
1375 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001376}
1377
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001378int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
1379 const unsigned char *addr, u16 vid)
1380{
Vladimir Oltean1da73822019-06-03 00:15:45 +03001381 struct sja1105_l2_lookup_entry l2_lookup = {0};
1382 struct sja1105_private *priv = ds->priv;
1383 int rc, i;
1384
1385 /* Search for an existing entry in the FDB table */
1386 l2_lookup.macaddr = ether_addr_to_u64(addr);
1387 l2_lookup.vlanid = vid;
1388 l2_lookup.iotag = SJA1105_S_TAG;
1389 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
Vladimir Oltean7f149372020-05-12 20:20:27 +03001390 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001391 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1392 l2_lookup.mask_iotag = BIT(0);
1393 } else {
1394 l2_lookup.mask_vlanid = 0;
1395 l2_lookup.mask_iotag = 0;
1396 }
Vladimir Oltean1da73822019-06-03 00:15:45 +03001397 l2_lookup.destports = BIT(port);
1398
1399 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1400 SJA1105_SEARCH, &l2_lookup);
1401 if (rc == 0) {
1402 /* Found and this port is already in the entry's
1403 * port mask => job done
1404 */
1405 if (l2_lookup.destports & BIT(port))
1406 return 0;
1407 /* l2_lookup.index is populated by the switch in case it
1408 * found something.
1409 */
1410 l2_lookup.destports |= BIT(port);
1411 goto skip_finding_an_index;
1412 }
1413
1414 /* Not found, so try to find an unused spot in the FDB.
1415 * This is slightly inefficient because the strategy is knock-knock at
1416 * every possible position from 0 to 1023.
1417 */
1418 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1419 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1420 i, NULL);
1421 if (rc < 0)
1422 break;
1423 }
1424 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
1425 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
1426 return -EINVAL;
1427 }
Vladimir Oltean17ae6552019-06-26 02:39:40 +03001428 l2_lookup.lockeds = true;
Vladimir Oltean1da73822019-06-03 00:15:45 +03001429 l2_lookup.index = i;
1430
1431skip_finding_an_index:
Vladimir Oltean60f60532019-06-26 02:39:38 +03001432 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1433 l2_lookup.index, &l2_lookup,
1434 true);
1435 if (rc < 0)
1436 return rc;
1437
1438 return sja1105_static_fdb_change(priv, port, &l2_lookup, true);
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001439}
1440
1441int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
1442 const unsigned char *addr, u16 vid)
1443{
Vladimir Oltean1da73822019-06-03 00:15:45 +03001444 struct sja1105_l2_lookup_entry l2_lookup = {0};
1445 struct sja1105_private *priv = ds->priv;
1446 bool keep;
1447 int rc;
1448
1449 l2_lookup.macaddr = ether_addr_to_u64(addr);
1450 l2_lookup.vlanid = vid;
1451 l2_lookup.iotag = SJA1105_S_TAG;
1452 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
Vladimir Oltean7f149372020-05-12 20:20:27 +03001453 if (priv->vlan_state != SJA1105_VLAN_UNAWARE) {
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001454 l2_lookup.mask_vlanid = VLAN_VID_MASK;
1455 l2_lookup.mask_iotag = BIT(0);
1456 } else {
1457 l2_lookup.mask_vlanid = 0;
1458 l2_lookup.mask_iotag = 0;
1459 }
Vladimir Oltean1da73822019-06-03 00:15:45 +03001460 l2_lookup.destports = BIT(port);
1461
1462 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1463 SJA1105_SEARCH, &l2_lookup);
1464 if (rc < 0)
1465 return 0;
1466
1467 l2_lookup.destports &= ~BIT(port);
1468
1469 /* Decide whether we remove just this port from the FDB entry,
1470 * or if we remove it completely.
1471 */
1472 if (l2_lookup.destports)
1473 keep = true;
1474 else
1475 keep = false;
1476
Vladimir Oltean60f60532019-06-26 02:39:38 +03001477 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1478 l2_lookup.index, &l2_lookup, keep);
1479 if (rc < 0)
1480 return rc;
1481
1482 return sja1105_static_fdb_change(priv, port, &l2_lookup, keep);
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001483}
1484
1485static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1486 const unsigned char *addr, u16 vid)
1487{
1488 struct sja1105_private *priv = ds->priv;
Vladimir Olteanb3ee5262019-06-26 02:39:41 +03001489
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001490 /* dsa_8021q is in effect when the bridge's vlan_filtering isn't,
1491 * so the switch still does some VLAN processing internally.
1492 * But Shared VLAN Learning (SVL) is also active, and it will take
1493 * care of autonomous forwarding between the unique pvid's of each
1494 * port. Here we just make sure that users can't add duplicate FDB
1495 * entries when in this mode - the actual VID doesn't matter except
1496 * for what gets printed in 'bridge fdb show'. In the case of zero,
1497 * no VID gets printed at all.
Vladimir Oltean93647592019-06-03 00:16:01 +03001498 */
Vladimir Oltean7f149372020-05-12 20:20:27 +03001499 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001500 vid = 0;
Vladimir Oltean93647592019-06-03 00:16:01 +03001501
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001502 return priv->info->fdb_add_cmd(ds, port, addr, vid);
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001503}
1504
1505static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1506 const unsigned char *addr, u16 vid)
1507{
1508 struct sja1105_private *priv = ds->priv;
1509
Vladimir Oltean7f149372020-05-12 20:20:27 +03001510 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL)
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001511 vid = 0;
Vladimir Oltean93647592019-06-03 00:16:01 +03001512
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001513 return priv->info->fdb_del_cmd(ds, port, addr, vid);
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001514}
1515
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001516static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1517 dsa_fdb_dump_cb_t *cb, void *data)
1518{
1519 struct sja1105_private *priv = ds->priv;
1520 struct device *dev = ds->dev;
1521 int i;
1522
1523 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1524 struct sja1105_l2_lookup_entry l2_lookup = {0};
1525 u8 macaddr[ETH_ALEN];
1526 int rc;
1527
1528 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1529 i, &l2_lookup);
1530 /* No fdb entry at i, not an issue */
Vladimir Olteandef84602019-06-03 00:11:59 +03001531 if (rc == -ENOENT)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001532 continue;
1533 if (rc) {
1534 dev_err(dev, "Failed to dump FDB: %d\n", rc);
1535 return rc;
1536 }
1537
1538 /* FDB dump callback is per port. This means we have to
1539 * disregard a valid entry if it's not for this port, even if
1540 * only to revisit it later. This is inefficient because the
1541 * 1024-sized FDB table needs to be traversed 4 times through
1542 * SPI during a 'bridge fdb show' command.
1543 */
1544 if (!(l2_lookup.destports & BIT(port)))
1545 continue;
Vladimir Oltean4d942352021-02-12 17:16:00 +02001546
1547 /* We need to hide the FDB entry for unknown multicast */
1548 if (l2_lookup.macaddr == SJA1105_UNKNOWN_MULTICAST &&
1549 l2_lookup.mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
1550 continue;
1551
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001552 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
Vladimir Oltean93647592019-06-03 00:16:01 +03001553
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001554 /* We need to hide the dsa_8021q VLANs from the user. */
Vladimir Oltean7f149372020-05-12 20:20:27 +03001555 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03001556 l2_lookup.vlanid = 0;
Vladimir Oltean17ae6552019-06-26 02:39:40 +03001557 cb(macaddr, l2_lookup.vlanid, l2_lookup.lockeds, data);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001558 }
1559 return 0;
1560}
1561
Vladimir Olteana52b2da2021-01-09 02:01:52 +02001562static int sja1105_mdb_add(struct dsa_switch *ds, int port,
1563 const struct switchdev_obj_port_mdb *mdb)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001564{
Vladimir Olteana52b2da2021-01-09 02:01:52 +02001565 return sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001566}
1567
1568static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1569 const struct switchdev_obj_port_mdb *mdb)
1570{
1571 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1572}
1573
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001574static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1575 struct net_device *br, bool member)
1576{
1577 struct sja1105_l2_forwarding_entry *l2_fwd;
1578 struct sja1105_private *priv = ds->priv;
1579 int i, rc;
1580
1581 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1582
1583 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1584 /* Add this port to the forwarding matrix of the
1585 * other ports in the same bridge, and viceversa.
1586 */
1587 if (!dsa_is_user_port(ds, i))
1588 continue;
1589 /* For the ports already under the bridge, only one thing needs
1590 * to be done, and that is to add this port to their
1591 * reachability domain. So we can perform the SPI write for
1592 * them immediately. However, for this port itself (the one
1593 * that is new to the bridge), we need to add all other ports
1594 * to its reachability domain. So we do that incrementally in
1595 * this loop, and perform the SPI write only at the end, once
1596 * the domain contains all other bridge ports.
1597 */
1598 if (i == port)
1599 continue;
1600 if (dsa_to_port(ds, i)->bridge_dev != br)
1601 continue;
1602 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1603 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1604
1605 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1606 i, &l2_fwd[i], true);
1607 if (rc < 0)
1608 return rc;
1609 }
1610
1611 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1612 port, &l2_fwd[port], true);
1613}
1614
Vladimir Oltean640f7632019-05-05 13:19:28 +03001615static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1616 u8 state)
1617{
1618 struct sja1105_private *priv = ds->priv;
1619 struct sja1105_mac_config_entry *mac;
1620
1621 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1622
1623 switch (state) {
1624 case BR_STATE_DISABLED:
1625 case BR_STATE_BLOCKING:
1626 /* From UM10944 description of DRPDTAG (why put this there?):
1627 * "Management traffic flows to the port regardless of the state
1628 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1629 * At the moment no difference between DISABLED and BLOCKING.
1630 */
1631 mac[port].ingress = false;
1632 mac[port].egress = false;
1633 mac[port].dyn_learn = false;
1634 break;
1635 case BR_STATE_LISTENING:
1636 mac[port].ingress = true;
1637 mac[port].egress = false;
1638 mac[port].dyn_learn = false;
1639 break;
1640 case BR_STATE_LEARNING:
1641 mac[port].ingress = true;
1642 mac[port].egress = false;
Vladimir Oltean4d942352021-02-12 17:16:00 +02001643 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
Vladimir Oltean640f7632019-05-05 13:19:28 +03001644 break;
1645 case BR_STATE_FORWARDING:
1646 mac[port].ingress = true;
1647 mac[port].egress = true;
Vladimir Oltean4d942352021-02-12 17:16:00 +02001648 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
Vladimir Oltean640f7632019-05-05 13:19:28 +03001649 break;
1650 default:
1651 dev_err(ds->dev, "invalid STP state: %d\n", state);
1652 return;
1653 }
1654
1655 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1656 &mac[port], true);
1657}
1658
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001659static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1660 struct net_device *br)
1661{
1662 return sja1105_bridge_member(ds, port, br, true);
1663}
1664
1665static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1666 struct net_device *br)
1667{
1668 sja1105_bridge_member(ds, port, br, false);
1669}
1670
Vladimir Oltean4d752502020-05-28 03:27:58 +03001671#define BYTES_PER_KBIT (1000LL / 8)
1672
1673static int sja1105_find_unused_cbs_shaper(struct sja1105_private *priv)
1674{
1675 int i;
1676
1677 for (i = 0; i < priv->info->num_cbs_shapers; i++)
1678 if (!priv->cbs[i].idle_slope && !priv->cbs[i].send_slope)
1679 return i;
1680
1681 return -1;
1682}
1683
1684static int sja1105_delete_cbs_shaper(struct sja1105_private *priv, int port,
1685 int prio)
1686{
1687 int i;
1688
1689 for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1690 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1691
1692 if (cbs->port == port && cbs->prio == prio) {
1693 memset(cbs, 0, sizeof(*cbs));
1694 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS,
1695 i, cbs, true);
1696 }
1697 }
1698
1699 return 0;
1700}
1701
1702static int sja1105_setup_tc_cbs(struct dsa_switch *ds, int port,
1703 struct tc_cbs_qopt_offload *offload)
1704{
1705 struct sja1105_private *priv = ds->priv;
1706 struct sja1105_cbs_entry *cbs;
1707 int index;
1708
1709 if (!offload->enable)
1710 return sja1105_delete_cbs_shaper(priv, port, offload->queue);
1711
1712 index = sja1105_find_unused_cbs_shaper(priv);
1713 if (index < 0)
1714 return -ENOSPC;
1715
1716 cbs = &priv->cbs[index];
1717 cbs->port = port;
1718 cbs->prio = offload->queue;
1719 /* locredit and sendslope are negative by definition. In hardware,
1720 * positive values must be provided, and the negative sign is implicit.
1721 */
1722 cbs->credit_hi = offload->hicredit;
1723 cbs->credit_lo = abs(offload->locredit);
1724 /* User space is in kbits/sec, hardware in bytes/sec */
1725 cbs->idle_slope = offload->idleslope * BYTES_PER_KBIT;
1726 cbs->send_slope = abs(offload->sendslope * BYTES_PER_KBIT);
1727 /* Convert the negative values from 64-bit 2's complement
1728 * to 32-bit 2's complement (for the case of 0x80000000 whose
1729 * negative is still negative).
1730 */
1731 cbs->credit_lo &= GENMASK_ULL(31, 0);
1732 cbs->send_slope &= GENMASK_ULL(31, 0);
1733
1734 return sja1105_dynamic_config_write(priv, BLK_IDX_CBS, index, cbs,
1735 true);
1736}
1737
1738static int sja1105_reload_cbs(struct sja1105_private *priv)
1739{
1740 int rc = 0, i;
1741
1742 for (i = 0; i < priv->info->num_cbs_shapers; i++) {
1743 struct sja1105_cbs_entry *cbs = &priv->cbs[i];
1744
1745 if (!cbs->idle_slope && !cbs->send_slope)
1746 continue;
1747
1748 rc = sja1105_dynamic_config_write(priv, BLK_IDX_CBS, i, cbs,
1749 true);
1750 if (rc)
1751 break;
1752 }
1753
1754 return rc;
1755}
1756
Vladimir Oltean2eea1fa2019-11-12 23:22:00 +02001757static const char * const sja1105_reset_reasons[] = {
1758 [SJA1105_VLAN_FILTERING] = "VLAN filtering",
1759 [SJA1105_RX_HWTSTAMPING] = "RX timestamping",
1760 [SJA1105_AGEING_TIME] = "Ageing time",
1761 [SJA1105_SCHEDULING] = "Time-aware scheduling",
Vladimir Olteanc279c722020-03-27 21:55:45 +02001762 [SJA1105_BEST_EFFORT_POLICING] = "Best-effort policing",
Vladimir Olteandfacc5a2020-05-05 22:20:55 +03001763 [SJA1105_VIRTUAL_LINKS] = "Virtual links",
Vladimir Oltean2eea1fa2019-11-12 23:22:00 +02001764};
1765
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001766/* For situations where we need to change a setting at runtime that is only
1767 * available through the static configuration, resetting the switch in order
1768 * to upload the new static config is unavoidable. Back up the settings we
1769 * modify at runtime (currently only MAC) and restore them after uploading,
1770 * such that this operation is relatively seamless.
1771 */
Vladimir Oltean2eea1fa2019-11-12 23:22:00 +02001772int sja1105_static_config_reload(struct sja1105_private *priv,
1773 enum sja1105_reset_reason reason)
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001774{
Vladimir Oltean6cf99c12019-11-09 13:32:23 +02001775 struct ptp_system_timestamp ptp_sts_before;
1776 struct ptp_system_timestamp ptp_sts_after;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001777 struct sja1105_mac_config_entry *mac;
1778 int speed_mbps[SJA1105_NUM_PORTS];
Vladimir Oltean6cf99c12019-11-09 13:32:23 +02001779 struct dsa_switch *ds = priv->ds;
1780 s64 t1, t2, t3, t4;
1781 s64 t12, t34;
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001782 u16 bmcr = 0;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001783 int rc, i;
Vladimir Oltean6cf99c12019-11-09 13:32:23 +02001784 s64 now;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001785
Vladimir Olteanaf580ae2019-11-09 13:32:24 +02001786 mutex_lock(&priv->mgmt_lock);
1787
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001788 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1789
Vladimir Oltean8400cff2019-06-08 16:03:44 +03001790 /* Back up the dynamic link speed changed by sja1105_adjust_port_config
1791 * in order to temporarily restore it to SJA1105_SPEED_AUTO - which the
1792 * switch wants to see in the static config in order to allow us to
1793 * change it through the dynamic interface later.
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001794 */
1795 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1796 speed_mbps[i] = sja1105_speed[mac[i].speed];
1797 mac[i].speed = SJA1105_SPEED_AUTO;
1798 }
1799
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001800 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT))
1801 bmcr = sja1105_sgmii_read(priv, MII_BMCR);
1802
Vladimir Oltean6cf99c12019-11-09 13:32:23 +02001803 /* No PTP operations can run right now */
1804 mutex_lock(&priv->ptp_data.lock);
1805
1806 rc = __sja1105_ptp_gettimex(ds, &now, &ptp_sts_before);
1807 if (rc < 0)
1808 goto out_unlock_ptp;
1809
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001810 /* Reset switch and send updated static configuration */
1811 rc = sja1105_static_config_upload(priv);
1812 if (rc < 0)
Vladimir Oltean6cf99c12019-11-09 13:32:23 +02001813 goto out_unlock_ptp;
1814
1815 rc = __sja1105_ptp_settime(ds, 0, &ptp_sts_after);
1816 if (rc < 0)
1817 goto out_unlock_ptp;
1818
1819 t1 = timespec64_to_ns(&ptp_sts_before.pre_ts);
1820 t2 = timespec64_to_ns(&ptp_sts_before.post_ts);
1821 t3 = timespec64_to_ns(&ptp_sts_after.pre_ts);
1822 t4 = timespec64_to_ns(&ptp_sts_after.post_ts);
1823 /* Mid point, corresponds to pre-reset PTPCLKVAL */
1824 t12 = t1 + (t2 - t1) / 2;
1825 /* Mid point, corresponds to post-reset PTPCLKVAL, aka 0 */
1826 t34 = t3 + (t4 - t3) / 2;
1827 /* Advance PTPCLKVAL by the time it took since its readout */
1828 now += (t34 - t12);
1829
1830 __sja1105_ptp_adjtime(ds, now);
1831
1832out_unlock_ptp:
1833 mutex_unlock(&priv->ptp_data.lock);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001834
Vladimir Oltean2eea1fa2019-11-12 23:22:00 +02001835 dev_info(priv->ds->dev,
1836 "Reset switch and programmed static config. Reason: %s\n",
1837 sja1105_reset_reasons[reason]);
1838
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001839 /* Configure the CGU (PLLs) for MII and RMII PHYs.
1840 * For these interfaces there is no dynamic configuration
1841 * needed, since PLLs have same settings at all speeds.
1842 */
1843 rc = sja1105_clocking_setup(priv);
1844 if (rc < 0)
1845 goto out;
1846
1847 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Oltean8400cff2019-06-08 16:03:44 +03001848 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i]);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001849 if (rc < 0)
1850 goto out;
1851 }
Vladimir Olteanffe10e62020-03-20 13:29:37 +02001852
1853 if (sja1105_supports_sgmii(priv, SJA1105_SGMII_PORT)) {
1854 bool an_enabled = !!(bmcr & BMCR_ANENABLE);
1855
1856 sja1105_sgmii_pcs_config(priv, an_enabled, false);
1857
1858 if (!an_enabled) {
1859 int speed = SPEED_UNKNOWN;
1860
1861 if (bmcr & BMCR_SPEED1000)
1862 speed = SPEED_1000;
1863 else if (bmcr & BMCR_SPEED100)
1864 speed = SPEED_100;
1865 else if (bmcr & BMCR_SPEED10)
1866 speed = SPEED_10;
1867
1868 sja1105_sgmii_pcs_force_speed(priv, speed);
1869 }
1870 }
Vladimir Oltean4d752502020-05-28 03:27:58 +03001871
1872 rc = sja1105_reload_cbs(priv);
1873 if (rc < 0)
1874 goto out;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001875out:
Vladimir Olteanaf580ae2019-11-09 13:32:24 +02001876 mutex_unlock(&priv->mgmt_lock);
1877
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001878 return rc;
1879}
1880
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001881static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1882{
1883 struct sja1105_mac_config_entry *mac;
1884
1885 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1886
1887 mac[port].vlanid = pvid;
1888
1889 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1890 &mac[port], true);
1891}
1892
Vladimir Olteanac02a452020-05-10 19:37:43 +03001893static int sja1105_crosschip_bridge_join(struct dsa_switch *ds,
1894 int tree_index, int sw_index,
1895 int other_port, struct net_device *br)
1896{
1897 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1898 struct sja1105_private *other_priv = other_ds->priv;
1899 struct sja1105_private *priv = ds->priv;
1900 int port, rc;
1901
1902 if (other_ds->ops != &sja1105_switch_ops)
1903 return 0;
1904
1905 for (port = 0; port < ds->num_ports; port++) {
1906 if (!dsa_is_user_port(ds, port))
1907 continue;
1908 if (dsa_to_port(ds, port)->bridge_dev != br)
1909 continue;
1910
Vladimir Oltean5899ee32020-09-10 19:48:56 +03001911 rc = dsa_8021q_crosschip_bridge_join(priv->dsa_8021q_ctx,
1912 port,
1913 other_priv->dsa_8021q_ctx,
1914 other_port);
Vladimir Olteanac02a452020-05-10 19:37:43 +03001915 if (rc)
1916 return rc;
1917
Vladimir Oltean5899ee32020-09-10 19:48:56 +03001918 rc = dsa_8021q_crosschip_bridge_join(other_priv->dsa_8021q_ctx,
1919 other_port,
1920 priv->dsa_8021q_ctx,
1921 port);
Vladimir Olteanac02a452020-05-10 19:37:43 +03001922 if (rc)
1923 return rc;
1924 }
1925
1926 return 0;
1927}
1928
1929static void sja1105_crosschip_bridge_leave(struct dsa_switch *ds,
1930 int tree_index, int sw_index,
1931 int other_port,
1932 struct net_device *br)
1933{
1934 struct dsa_switch *other_ds = dsa_switch_find(tree_index, sw_index);
1935 struct sja1105_private *other_priv = other_ds->priv;
1936 struct sja1105_private *priv = ds->priv;
1937 int port;
1938
1939 if (other_ds->ops != &sja1105_switch_ops)
1940 return;
1941
1942 for (port = 0; port < ds->num_ports; port++) {
1943 if (!dsa_is_user_port(ds, port))
1944 continue;
1945 if (dsa_to_port(ds, port)->bridge_dev != br)
1946 continue;
1947
Vladimir Oltean5899ee32020-09-10 19:48:56 +03001948 dsa_8021q_crosschip_bridge_leave(priv->dsa_8021q_ctx, port,
1949 other_priv->dsa_8021q_ctx,
1950 other_port);
Vladimir Olteanac02a452020-05-10 19:37:43 +03001951
Vladimir Oltean5899ee32020-09-10 19:48:56 +03001952 dsa_8021q_crosschip_bridge_leave(other_priv->dsa_8021q_ctx,
1953 other_port,
1954 priv->dsa_8021q_ctx, port);
Vladimir Olteanac02a452020-05-10 19:37:43 +03001955 }
1956}
1957
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001958static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1959{
Vladimir Oltean60b33ae2020-05-12 20:20:28 +03001960 struct sja1105_private *priv = ds->priv;
Vladimir Oltean7e092af2020-09-10 19:48:55 +03001961 int rc;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001962
Vladimir Oltean5899ee32020-09-10 19:48:56 +03001963 rc = dsa_8021q_setup(priv->dsa_8021q_ctx, enabled);
Vladimir Oltean7e092af2020-09-10 19:48:55 +03001964 if (rc)
1965 return rc;
Vladimir Olteanac02a452020-05-10 19:37:43 +03001966
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001967 dev_info(ds->dev, "%s switch tagging\n",
1968 enabled ? "Enabled" : "Disabled");
1969 return 0;
1970}
1971
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001972static enum dsa_tag_protocol
Florian Fainelli4d776482020-01-07 21:06:05 -08001973sja1105_get_tag_protocol(struct dsa_switch *ds, int port,
1974 enum dsa_tag_protocol mp)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001975{
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001976 return DSA_TAG_PROTO_SJA1105;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001977}
1978
Vladimir Oltean3f01c912020-05-12 20:20:38 +03001979static int sja1105_find_free_subvlan(u16 *subvlan_map, bool pvid)
1980{
1981 int subvlan;
1982
1983 if (pvid)
1984 return 0;
1985
1986 for (subvlan = 1; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1987 if (subvlan_map[subvlan] == VLAN_N_VID)
1988 return subvlan;
1989
1990 return -1;
1991}
1992
1993static int sja1105_find_subvlan(u16 *subvlan_map, u16 vid)
1994{
1995 int subvlan;
1996
1997 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
1998 if (subvlan_map[subvlan] == vid)
1999 return subvlan;
2000
2001 return -1;
2002}
2003
2004static int sja1105_find_committed_subvlan(struct sja1105_private *priv,
2005 int port, u16 vid)
2006{
2007 struct sja1105_port *sp = &priv->ports[port];
2008
2009 return sja1105_find_subvlan(sp->subvlan_map, vid);
2010}
2011
2012static void sja1105_init_subvlan_map(u16 *subvlan_map)
2013{
2014 int subvlan;
2015
2016 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2017 subvlan_map[subvlan] = VLAN_N_VID;
2018}
2019
2020static void sja1105_commit_subvlan_map(struct sja1105_private *priv, int port,
2021 u16 *subvlan_map)
2022{
2023 struct sja1105_port *sp = &priv->ports[port];
2024 int subvlan;
2025
2026 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
2027 sp->subvlan_map[subvlan] = subvlan_map[subvlan];
2028}
2029
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002030static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
2031{
2032 struct sja1105_vlan_lookup_entry *vlan;
2033 int count, i;
2034
2035 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
2036 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
2037
2038 for (i = 0; i < count; i++)
2039 if (vlan[i].vlanid == vid)
2040 return i;
2041
2042 /* Return an invalid entry index if not found */
2043 return -1;
2044}
2045
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002046static int
2047sja1105_find_retagging_entry(struct sja1105_retagging_entry *retagging,
2048 int count, int from_port, u16 from_vid,
2049 u16 to_vid)
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002050{
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002051 int i;
2052
2053 for (i = 0; i < count; i++)
2054 if (retagging[i].ing_port == BIT(from_port) &&
2055 retagging[i].vlan_ing == from_vid &&
2056 retagging[i].vlan_egr == to_vid)
2057 return i;
2058
2059 /* Return an invalid entry index if not found */
2060 return -1;
2061}
2062
2063static int sja1105_commit_vlans(struct sja1105_private *priv,
2064 struct sja1105_vlan_lookup_entry *new_vlan,
2065 struct sja1105_retagging_entry *new_retagging,
2066 int num_retagging)
2067{
2068 struct sja1105_retagging_entry *retagging;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002069 struct sja1105_vlan_lookup_entry *vlan;
2070 struct sja1105_table *table;
2071 int num_vlans = 0;
2072 int rc, i, k = 0;
2073
2074 /* VLAN table */
2075 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2076 vlan = table->entries;
2077
2078 for (i = 0; i < VLAN_N_VID; i++) {
2079 int match = sja1105_is_vlan_configured(priv, i);
2080
2081 if (new_vlan[i].vlanid != VLAN_N_VID)
2082 num_vlans++;
2083
2084 if (new_vlan[i].vlanid == VLAN_N_VID && match >= 0) {
2085 /* Was there before, no longer is. Delete */
2086 dev_dbg(priv->ds->dev, "Deleting VLAN %d\n", i);
2087 rc = sja1105_dynamic_config_write(priv,
2088 BLK_IDX_VLAN_LOOKUP,
2089 i, &vlan[match], false);
2090 if (rc < 0)
2091 return rc;
2092 } else if (new_vlan[i].vlanid != VLAN_N_VID) {
2093 /* Nothing changed, don't do anything */
2094 if (match >= 0 &&
2095 vlan[match].vlanid == new_vlan[i].vlanid &&
2096 vlan[match].tag_port == new_vlan[i].tag_port &&
2097 vlan[match].vlan_bc == new_vlan[i].vlan_bc &&
2098 vlan[match].vmemb_port == new_vlan[i].vmemb_port)
2099 continue;
2100 /* Update entry */
2101 dev_dbg(priv->ds->dev, "Updating VLAN %d\n", i);
2102 rc = sja1105_dynamic_config_write(priv,
2103 BLK_IDX_VLAN_LOOKUP,
2104 i, &new_vlan[i],
2105 true);
2106 if (rc < 0)
2107 return rc;
2108 }
2109 }
2110
2111 if (table->entry_count)
2112 kfree(table->entries);
2113
2114 table->entries = kcalloc(num_vlans, table->ops->unpacked_entry_size,
2115 GFP_KERNEL);
2116 if (!table->entries)
2117 return -ENOMEM;
2118
2119 table->entry_count = num_vlans;
2120 vlan = table->entries;
2121
2122 for (i = 0; i < VLAN_N_VID; i++) {
2123 if (new_vlan[i].vlanid == VLAN_N_VID)
2124 continue;
2125 vlan[k++] = new_vlan[i];
2126 }
2127
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002128 /* VLAN Retagging Table */
2129 table = &priv->static_config.tables[BLK_IDX_RETAGGING];
2130 retagging = table->entries;
2131
2132 for (i = 0; i < table->entry_count; i++) {
2133 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2134 i, &retagging[i], false);
2135 if (rc)
2136 return rc;
2137 }
2138
2139 if (table->entry_count)
2140 kfree(table->entries);
2141
2142 table->entries = kcalloc(num_retagging, table->ops->unpacked_entry_size,
2143 GFP_KERNEL);
2144 if (!table->entries)
2145 return -ENOMEM;
2146
2147 table->entry_count = num_retagging;
2148 retagging = table->entries;
2149
2150 for (i = 0; i < num_retagging; i++) {
2151 retagging[i] = new_retagging[i];
2152
2153 /* Update entry */
2154 rc = sja1105_dynamic_config_write(priv, BLK_IDX_RETAGGING,
2155 i, &retagging[i], true);
2156 if (rc < 0)
2157 return rc;
2158 }
2159
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002160 return 0;
2161}
2162
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002163struct sja1105_crosschip_vlan {
2164 struct list_head list;
2165 u16 vid;
2166 bool untagged;
2167 int port;
2168 int other_port;
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002169 struct dsa_8021q_context *other_ctx;
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002170};
2171
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002172struct sja1105_crosschip_switch {
2173 struct list_head list;
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002174 struct dsa_8021q_context *other_ctx;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002175};
2176
2177static int sja1105_commit_pvid(struct sja1105_private *priv)
2178{
2179 struct sja1105_bridge_vlan *v;
2180 struct list_head *vlan_list;
2181 int rc = 0;
2182
2183 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2184 vlan_list = &priv->bridge_vlans;
2185 else
2186 vlan_list = &priv->dsa_8021q_vlans;
2187
2188 list_for_each_entry(v, vlan_list, list) {
2189 if (v->pvid) {
2190 rc = sja1105_pvid_apply(priv, v->port, v->vid);
2191 if (rc)
2192 break;
2193 }
2194 }
2195
2196 return rc;
2197}
2198
2199static int
2200sja1105_build_bridge_vlans(struct sja1105_private *priv,
2201 struct sja1105_vlan_lookup_entry *new_vlan)
2202{
2203 struct sja1105_bridge_vlan *v;
2204
2205 if (priv->vlan_state == SJA1105_VLAN_UNAWARE)
2206 return 0;
2207
2208 list_for_each_entry(v, &priv->bridge_vlans, list) {
2209 int match = v->vid;
2210
2211 new_vlan[match].vlanid = v->vid;
2212 new_vlan[match].vmemb_port |= BIT(v->port);
2213 new_vlan[match].vlan_bc |= BIT(v->port);
2214 if (!v->untagged)
2215 new_vlan[match].tag_port |= BIT(v->port);
2216 }
2217
2218 return 0;
2219}
2220
2221static int
2222sja1105_build_dsa_8021q_vlans(struct sja1105_private *priv,
2223 struct sja1105_vlan_lookup_entry *new_vlan)
2224{
2225 struct sja1105_bridge_vlan *v;
2226
2227 if (priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2228 return 0;
2229
2230 list_for_each_entry(v, &priv->dsa_8021q_vlans, list) {
2231 int match = v->vid;
2232
2233 new_vlan[match].vlanid = v->vid;
2234 new_vlan[match].vmemb_port |= BIT(v->port);
2235 new_vlan[match].vlan_bc |= BIT(v->port);
2236 if (!v->untagged)
2237 new_vlan[match].tag_port |= BIT(v->port);
2238 }
2239
2240 return 0;
2241}
2242
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002243static int sja1105_build_subvlans(struct sja1105_private *priv,
2244 u16 subvlan_map[][DSA_8021Q_N_SUBVLAN],
2245 struct sja1105_vlan_lookup_entry *new_vlan,
2246 struct sja1105_retagging_entry *new_retagging,
2247 int *num_retagging)
2248{
2249 struct sja1105_bridge_vlan *v;
2250 int k = *num_retagging;
2251
2252 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2253 return 0;
2254
2255 list_for_each_entry(v, &priv->bridge_vlans, list) {
2256 int upstream = dsa_upstream_port(priv->ds, v->port);
2257 int match, subvlan;
2258 u16 rx_vid;
2259
2260 /* Only sub-VLANs on user ports need to be applied.
2261 * Bridge VLANs also include VLANs added automatically
2262 * by DSA on the CPU port.
2263 */
2264 if (!dsa_is_user_port(priv->ds, v->port))
2265 continue;
2266
2267 subvlan = sja1105_find_subvlan(subvlan_map[v->port],
2268 v->vid);
2269 if (subvlan < 0) {
2270 subvlan = sja1105_find_free_subvlan(subvlan_map[v->port],
2271 v->pvid);
2272 if (subvlan < 0) {
2273 dev_err(priv->ds->dev, "No more free subvlans\n");
2274 return -ENOSPC;
2275 }
2276 }
2277
2278 rx_vid = dsa_8021q_rx_vid_subvlan(priv->ds, v->port, subvlan);
2279
2280 /* @v->vid on @v->port needs to be retagged to @rx_vid
2281 * on @upstream. Assume @v->vid on @v->port and on
2282 * @upstream was already configured by the previous
2283 * iteration over bridge_vlans.
2284 */
2285 match = rx_vid;
2286 new_vlan[match].vlanid = rx_vid;
2287 new_vlan[match].vmemb_port |= BIT(v->port);
2288 new_vlan[match].vmemb_port |= BIT(upstream);
2289 new_vlan[match].vlan_bc |= BIT(v->port);
2290 new_vlan[match].vlan_bc |= BIT(upstream);
2291 /* The "untagged" flag is set the same as for the
2292 * original VLAN
2293 */
2294 if (!v->untagged)
2295 new_vlan[match].tag_port |= BIT(v->port);
2296 /* But it's always tagged towards the CPU */
2297 new_vlan[match].tag_port |= BIT(upstream);
2298
2299 /* The Retagging Table generates packet *clones* with
2300 * the new VLAN. This is a very odd hardware quirk
2301 * which we need to suppress by dropping the original
2302 * packet.
2303 * Deny egress of the original VLAN towards the CPU
2304 * port. This will force the switch to drop it, and
2305 * we'll see only the retagged packets.
2306 */
2307 match = v->vid;
2308 new_vlan[match].vlan_bc &= ~BIT(upstream);
2309
2310 /* And the retagging itself */
2311 new_retagging[k].vlan_ing = v->vid;
2312 new_retagging[k].vlan_egr = rx_vid;
2313 new_retagging[k].ing_port = BIT(v->port);
2314 new_retagging[k].egr_port = BIT(upstream);
2315 if (k++ == SJA1105_MAX_RETAGGING_COUNT) {
2316 dev_err(priv->ds->dev, "No more retagging rules\n");
2317 return -ENOSPC;
2318 }
2319
2320 subvlan_map[v->port][subvlan] = v->vid;
2321 }
2322
2323 *num_retagging = k;
2324
2325 return 0;
2326}
2327
2328/* Sadly, in crosschip scenarios where the CPU port is also the link to another
2329 * switch, we should retag backwards (the dsa_8021q vid to the original vid) on
2330 * the CPU port of neighbour switches.
2331 */
2332static int
2333sja1105_build_crosschip_subvlans(struct sja1105_private *priv,
2334 struct sja1105_vlan_lookup_entry *new_vlan,
2335 struct sja1105_retagging_entry *new_retagging,
2336 int *num_retagging)
2337{
2338 struct sja1105_crosschip_vlan *tmp, *pos;
2339 struct dsa_8021q_crosschip_link *c;
2340 struct sja1105_bridge_vlan *v, *w;
2341 struct list_head crosschip_vlans;
2342 int k = *num_retagging;
2343 int rc = 0;
2344
2345 if (priv->vlan_state != SJA1105_VLAN_BEST_EFFORT)
2346 return 0;
2347
2348 INIT_LIST_HEAD(&crosschip_vlans);
2349
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002350 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
2351 struct sja1105_private *other_priv = c->other_ctx->ds->priv;
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002352
2353 if (other_priv->vlan_state == SJA1105_VLAN_FILTERING_FULL)
2354 continue;
2355
2356 /* Crosschip links are also added to the CPU ports.
2357 * Ignore those.
2358 */
2359 if (!dsa_is_user_port(priv->ds, c->port))
2360 continue;
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002361 if (!dsa_is_user_port(c->other_ctx->ds, c->other_port))
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002362 continue;
2363
2364 /* Search for VLANs on the remote port */
2365 list_for_each_entry(v, &other_priv->bridge_vlans, list) {
2366 bool already_added = false;
2367 bool we_have_it = false;
2368
2369 if (v->port != c->other_port)
2370 continue;
2371
2372 /* If @v is a pvid on @other_ds, it does not need
2373 * re-retagging, because its SVL field is 0 and we
2374 * already allow that, via the dsa_8021q crosschip
2375 * links.
2376 */
2377 if (v->pvid)
2378 continue;
2379
2380 /* Search for the VLAN on our local port */
2381 list_for_each_entry(w, &priv->bridge_vlans, list) {
2382 if (w->port == c->port && w->vid == v->vid) {
2383 we_have_it = true;
2384 break;
2385 }
2386 }
2387
2388 if (!we_have_it)
2389 continue;
2390
2391 list_for_each_entry(tmp, &crosschip_vlans, list) {
2392 if (tmp->vid == v->vid &&
2393 tmp->untagged == v->untagged &&
2394 tmp->port == c->port &&
2395 tmp->other_port == v->port &&
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002396 tmp->other_ctx == c->other_ctx) {
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002397 already_added = true;
2398 break;
2399 }
2400 }
2401
2402 if (already_added)
2403 continue;
2404
2405 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
2406 if (!tmp) {
2407 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2408 rc = -ENOMEM;
2409 goto out;
2410 }
2411 tmp->vid = v->vid;
2412 tmp->port = c->port;
2413 tmp->other_port = v->port;
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002414 tmp->other_ctx = c->other_ctx;
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002415 tmp->untagged = v->untagged;
2416 list_add(&tmp->list, &crosschip_vlans);
2417 }
2418 }
2419
2420 list_for_each_entry(tmp, &crosschip_vlans, list) {
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002421 struct sja1105_private *other_priv = tmp->other_ctx->ds->priv;
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002422 int upstream = dsa_upstream_port(priv->ds, tmp->port);
2423 int match, subvlan;
2424 u16 rx_vid;
2425
2426 subvlan = sja1105_find_committed_subvlan(other_priv,
2427 tmp->other_port,
2428 tmp->vid);
2429 /* If this happens, it's a bug. The neighbour switch does not
2430 * have a subvlan for tmp->vid on tmp->other_port, but it
2431 * should, since we already checked for its vlan_state.
2432 */
2433 if (WARN_ON(subvlan < 0)) {
2434 rc = -EINVAL;
2435 goto out;
2436 }
2437
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002438 rx_vid = dsa_8021q_rx_vid_subvlan(tmp->other_ctx->ds,
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002439 tmp->other_port,
2440 subvlan);
2441
2442 /* The @rx_vid retagged from @tmp->vid on
2443 * {@tmp->other_ds, @tmp->other_port} needs to be
2444 * re-retagged to @tmp->vid on the way back to us.
2445 *
2446 * Assume the original @tmp->vid is already configured
2447 * on this local switch, otherwise we wouldn't be
2448 * retagging its subvlan on the other switch in the
2449 * first place. We just need to add a reverse retagging
2450 * rule for @rx_vid and install @rx_vid on our ports.
2451 */
2452 match = rx_vid;
2453 new_vlan[match].vlanid = rx_vid;
2454 new_vlan[match].vmemb_port |= BIT(tmp->port);
2455 new_vlan[match].vmemb_port |= BIT(upstream);
2456 /* The "untagged" flag is set the same as for the
2457 * original VLAN. And towards the CPU, it doesn't
2458 * really matter, because @rx_vid will only receive
2459 * traffic on that port. For consistency with other dsa_8021q
2460 * VLANs, we'll keep the CPU port tagged.
2461 */
2462 if (!tmp->untagged)
2463 new_vlan[match].tag_port |= BIT(tmp->port);
2464 new_vlan[match].tag_port |= BIT(upstream);
2465 /* Deny egress of @rx_vid towards our front-panel port.
2466 * This will force the switch to drop it, and we'll see
2467 * only the re-retagged packets (having the original,
2468 * pre-initial-retagging, VLAN @tmp->vid).
2469 */
2470 new_vlan[match].vlan_bc &= ~BIT(tmp->port);
2471
2472 /* On reverse retagging, the same ingress VLAN goes to multiple
2473 * ports. So we have an opportunity to create composite rules
2474 * to not waste the limited space in the retagging table.
2475 */
2476 k = sja1105_find_retagging_entry(new_retagging, *num_retagging,
2477 upstream, rx_vid, tmp->vid);
2478 if (k < 0) {
2479 if (*num_retagging == SJA1105_MAX_RETAGGING_COUNT) {
2480 dev_err(priv->ds->dev, "No more retagging rules\n");
2481 rc = -ENOSPC;
2482 goto out;
2483 }
2484 k = (*num_retagging)++;
2485 }
2486 /* And the retagging itself */
2487 new_retagging[k].vlan_ing = rx_vid;
2488 new_retagging[k].vlan_egr = tmp->vid;
2489 new_retagging[k].ing_port = BIT(upstream);
2490 new_retagging[k].egr_port |= BIT(tmp->port);
2491 }
2492
2493out:
2494 list_for_each_entry_safe(tmp, pos, &crosschip_vlans, list) {
2495 list_del(&tmp->list);
2496 kfree(tmp);
2497 }
2498
2499 return rc;
2500}
2501
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002502static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify);
2503
2504static int sja1105_notify_crosschip_switches(struct sja1105_private *priv)
2505{
2506 struct sja1105_crosschip_switch *s, *pos;
2507 struct list_head crosschip_switches;
2508 struct dsa_8021q_crosschip_link *c;
2509 int rc = 0;
2510
2511 INIT_LIST_HEAD(&crosschip_switches);
2512
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002513 list_for_each_entry(c, &priv->dsa_8021q_ctx->crosschip_links, list) {
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002514 bool already_added = false;
2515
2516 list_for_each_entry(s, &crosschip_switches, list) {
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002517 if (s->other_ctx == c->other_ctx) {
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002518 already_added = true;
2519 break;
2520 }
2521 }
2522
2523 if (already_added)
2524 continue;
2525
2526 s = kzalloc(sizeof(*s), GFP_KERNEL);
2527 if (!s) {
2528 dev_err(priv->ds->dev, "Failed to allocate memory\n");
2529 rc = -ENOMEM;
2530 goto out;
2531 }
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002532 s->other_ctx = c->other_ctx;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002533 list_add(&s->list, &crosschip_switches);
2534 }
2535
2536 list_for_each_entry(s, &crosschip_switches, list) {
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002537 struct sja1105_private *other_priv = s->other_ctx->ds->priv;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002538
2539 rc = sja1105_build_vlan_table(other_priv, false);
2540 if (rc)
2541 goto out;
2542 }
2543
2544out:
2545 list_for_each_entry_safe(s, pos, &crosschip_switches, list) {
2546 list_del(&s->list);
2547 kfree(s);
2548 }
2549
2550 return rc;
2551}
2552
2553static int sja1105_build_vlan_table(struct sja1105_private *priv, bool notify)
2554{
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002555 u16 subvlan_map[SJA1105_NUM_PORTS][DSA_8021Q_N_SUBVLAN];
2556 struct sja1105_retagging_entry *new_retagging;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002557 struct sja1105_vlan_lookup_entry *new_vlan;
2558 struct sja1105_table *table;
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002559 int i, num_retagging = 0;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002560 int rc;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002561
2562 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2563 new_vlan = kcalloc(VLAN_N_VID,
2564 table->ops->unpacked_entry_size, GFP_KERNEL);
2565 if (!new_vlan)
2566 return -ENOMEM;
2567
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002568 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
2569 new_retagging = kcalloc(SJA1105_MAX_RETAGGING_COUNT,
2570 table->ops->unpacked_entry_size, GFP_KERNEL);
2571 if (!new_retagging) {
2572 kfree(new_vlan);
2573 return -ENOMEM;
2574 }
2575
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002576 for (i = 0; i < VLAN_N_VID; i++)
2577 new_vlan[i].vlanid = VLAN_N_VID;
2578
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002579 for (i = 0; i < SJA1105_MAX_RETAGGING_COUNT; i++)
2580 new_retagging[i].vlan_ing = VLAN_N_VID;
2581
2582 for (i = 0; i < priv->ds->num_ports; i++)
2583 sja1105_init_subvlan_map(subvlan_map[i]);
2584
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002585 /* Bridge VLANs */
2586 rc = sja1105_build_bridge_vlans(priv, new_vlan);
2587 if (rc)
2588 goto out;
2589
2590 /* VLANs necessary for dsa_8021q operation, given to us by tag_8021q.c:
2591 * - RX VLANs
2592 * - TX VLANs
2593 * - Crosschip links
2594 */
2595 rc = sja1105_build_dsa_8021q_vlans(priv, new_vlan);
2596 if (rc)
2597 goto out;
2598
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002599 /* Private VLANs necessary for dsa_8021q operation, which we need to
2600 * determine on our own:
2601 * - Sub-VLANs
2602 * - Sub-VLANs of crosschip switches
2603 */
2604 rc = sja1105_build_subvlans(priv, subvlan_map, new_vlan, new_retagging,
2605 &num_retagging);
2606 if (rc)
2607 goto out;
2608
2609 rc = sja1105_build_crosschip_subvlans(priv, new_vlan, new_retagging,
2610 &num_retagging);
2611 if (rc)
2612 goto out;
2613
2614 rc = sja1105_commit_vlans(priv, new_vlan, new_retagging, num_retagging);
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002615 if (rc)
2616 goto out;
2617
2618 rc = sja1105_commit_pvid(priv);
2619 if (rc)
2620 goto out;
2621
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002622 for (i = 0; i < priv->ds->num_ports; i++)
2623 sja1105_commit_subvlan_map(priv, i, subvlan_map[i]);
2624
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002625 if (notify) {
2626 rc = sja1105_notify_crosschip_switches(priv);
2627 if (rc)
2628 goto out;
2629 }
2630
2631out:
2632 kfree(new_vlan);
Vladimir Oltean3f01c912020-05-12 20:20:38 +03002633 kfree(new_retagging);
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002634
2635 return rc;
2636}
2637
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002638/* The TPID setting belongs to the General Parameters table,
2639 * which can only be partially reconfigured at runtime (and not the TPID).
2640 * So a switch reset is required.
2641 */
Vladimir Oltean89153ed2021-02-13 22:43:19 +02002642int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled,
2643 struct netlink_ext_ack *extack)
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002644{
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03002645 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002646 struct sja1105_general_params_entry *general_params;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002647 struct sja1105_private *priv = ds->priv;
Vladimir Oltean7f149372020-05-12 20:20:27 +03002648 enum sja1105_vlan_state state;
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002649 struct sja1105_table *table;
Vladimir Olteandfacc5a2020-05-05 22:20:55 +03002650 struct sja1105_rule *rule;
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002651 bool want_tagging;
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002652 u16 tpid, tpid2;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002653 int rc;
2654
Vladimir Olteanbae33f22021-01-09 02:01:50 +02002655 list_for_each_entry(rule, &priv->flow_block.rules, list) {
2656 if (rule->type == SJA1105_RULE_VL) {
Vladimir Oltean89153ed2021-02-13 22:43:19 +02002657 NL_SET_ERR_MSG_MOD(extack,
2658 "Cannot change VLAN filtering with active VL rules");
Vladimir Olteanbae33f22021-01-09 02:01:50 +02002659 return -EBUSY;
Vladimir Olteandfacc5a2020-05-05 22:20:55 +03002660 }
2661 }
2662
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002663 if (enabled) {
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002664 /* Enable VLAN filtering. */
Vladimir Oltean54fa49e2019-12-27 03:11:13 +02002665 tpid = ETH_P_8021Q;
2666 tpid2 = ETH_P_8021AD;
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002667 } else {
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002668 /* Disable VLAN filtering. */
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002669 tpid = ETH_P_SJA1105;
2670 tpid2 = ETH_P_SJA1105;
2671 }
2672
Vladimir Oltean38b5bee2020-05-12 20:20:32 +03002673 for (port = 0; port < ds->num_ports; port++) {
2674 struct sja1105_port *sp = &priv->ports[port];
2675
2676 if (enabled)
2677 sp->xmit_tpid = priv->info->qinq_tpid;
2678 else
2679 sp->xmit_tpid = ETH_P_SJA1105;
2680 }
2681
Vladimir Oltean7f149372020-05-12 20:20:27 +03002682 if (!enabled)
2683 state = SJA1105_VLAN_UNAWARE;
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002684 else if (priv->best_effort_vlan_filtering)
2685 state = SJA1105_VLAN_BEST_EFFORT;
Vladimir Oltean7f149372020-05-12 20:20:27 +03002686 else
2687 state = SJA1105_VLAN_FILTERING_FULL;
2688
Vladimir Olteancfa36b12020-05-12 20:20:31 +03002689 if (priv->vlan_state == state)
2690 return 0;
2691
Vladimir Oltean7f149372020-05-12 20:20:27 +03002692 priv->vlan_state = state;
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002693 want_tagging = (state == SJA1105_VLAN_UNAWARE ||
2694 state == SJA1105_VLAN_BEST_EFFORT);
Vladimir Oltean7f149372020-05-12 20:20:27 +03002695
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002696 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
2697 general_params = table->entries;
Vladimir Olteanf9a1a762019-06-08 15:04:31 +03002698 /* EtherType used to identify inner tagged (C-tag) VLAN traffic */
Vladimir Oltean54fa49e2019-12-27 03:11:13 +02002699 general_params->tpid = tpid;
2700 /* EtherType used to identify outer tagged (S-tag) VLAN traffic */
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002701 general_params->tpid2 = tpid2;
Vladimir Oltean42824462019-06-08 15:04:32 +03002702 /* When VLAN filtering is on, we need to at least be able to
2703 * decode management traffic through the "backup plan".
2704 */
2705 general_params->incl_srcpt1 = enabled;
2706 general_params->incl_srcpt0 = enabled;
Vladimir Oltean070ca3b2019-06-08 15:04:30 +03002707
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002708 want_tagging = priv->best_effort_vlan_filtering || !enabled;
2709
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03002710 /* VLAN filtering => independent VLAN learning.
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002711 * No VLAN filtering (or best effort) => shared VLAN learning.
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03002712 *
2713 * In shared VLAN learning mode, untagged traffic still gets
2714 * pvid-tagged, and the FDB table gets populated with entries
2715 * containing the "real" (pvid or from VLAN tag) VLAN ID.
2716 * However the switch performs a masked L2 lookup in the FDB,
2717 * effectively only looking up a frame's DMAC (and not VID) for the
2718 * forwarding decision.
2719 *
2720 * This is extremely convenient for us, because in modes with
2721 * vlan_filtering=0, dsa_8021q actually installs unique pvid's into
2722 * each front panel port. This is good for identification but breaks
2723 * learning badly - the VID of the learnt FDB entry is unique, aka
2724 * no frames coming from any other port are going to have it. So
2725 * for forwarding purposes, this is as though learning was broken
2726 * (all frames get flooded).
2727 */
2728 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
2729 l2_lookup_params = table->entries;
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002730 l2_lookup_params->shared_learn = want_tagging;
Vladimir Oltean6d7c7d92019-08-05 01:38:44 +03002731
Vladimir Olteanaaa270c2020-05-12 20:20:37 +03002732 sja1105_frame_memory_partitioning(priv);
2733
Vladimir Olteanaef31712020-05-27 20:20:38 +03002734 rc = sja1105_build_vlan_table(priv, false);
2735 if (rc)
2736 return rc;
2737
Vladimir Oltean2eea1fa2019-11-12 23:22:00 +02002738 rc = sja1105_static_config_reload(priv, SJA1105_VLAN_FILTERING);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002739 if (rc)
Vladimir Oltean89153ed2021-02-13 22:43:19 +02002740 NL_SET_ERR_MSG_MOD(extack, "Failed to change VLAN Ethertype");
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002741
Vladimir Oltean227d07a2019-05-05 13:19:27 +03002742 /* Switch port identification based on 802.1Q is only passable
2743 * if we are not under a vlan_filtering bridge. So make sure
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002744 * the two configurations are mutually exclusive (of course, the
2745 * user may know better, i.e. best_effort_vlan_filtering).
Vladimir Oltean227d07a2019-05-05 13:19:27 +03002746 */
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002747 return sja1105_setup_8021q_tagging(ds, want_tagging);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002748}
2749
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002750/* Returns number of VLANs added (0 or 1) on success,
2751 * or a negative error code.
2752 */
2753static int sja1105_vlan_add_one(struct dsa_switch *ds, int port, u16 vid,
2754 u16 flags, struct list_head *vlan_list)
2755{
2756 bool untagged = flags & BRIDGE_VLAN_INFO_UNTAGGED;
2757 bool pvid = flags & BRIDGE_VLAN_INFO_PVID;
2758 struct sja1105_bridge_vlan *v;
2759
2760 list_for_each_entry(v, vlan_list, list)
2761 if (v->port == port && v->vid == vid &&
2762 v->untagged == untagged && v->pvid == pvid)
2763 /* Already added */
2764 return 0;
2765
2766 v = kzalloc(sizeof(*v), GFP_KERNEL);
2767 if (!v) {
2768 dev_err(ds->dev, "Out of memory while storing VLAN\n");
2769 return -ENOMEM;
2770 }
2771
2772 v->port = port;
2773 v->vid = vid;
2774 v->untagged = untagged;
2775 v->pvid = pvid;
2776 list_add(&v->list, vlan_list);
2777
2778 return 1;
2779}
2780
2781/* Returns number of VLANs deleted (0 or 1) */
2782static int sja1105_vlan_del_one(struct dsa_switch *ds, int port, u16 vid,
2783 struct list_head *vlan_list)
2784{
2785 struct sja1105_bridge_vlan *v, *n;
2786
2787 list_for_each_entry_safe(v, n, vlan_list, list) {
2788 if (v->port == port && v->vid == vid) {
2789 list_del(&v->list);
2790 kfree(v);
2791 return 1;
2792 }
2793 }
2794
2795 return 0;
2796}
2797
Vladimir Oltean1958d582021-01-09 02:01:53 +02002798static int sja1105_vlan_add(struct dsa_switch *ds, int port,
Vladimir Oltean31046a52021-02-13 22:43:18 +02002799 const struct switchdev_obj_port_vlan *vlan,
2800 struct netlink_ext_ack *extack)
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002801{
2802 struct sja1105_private *priv = ds->priv;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002803 bool vlan_table_changed = false;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002804 int rc;
2805
Vladimir Oltean1958d582021-01-09 02:01:53 +02002806 /* If the user wants best-effort VLAN filtering (aka vlan_filtering
2807 * bridge plus tagging), be sure to at least deny alterations to the
2808 * configuration done by dsa_8021q.
2809 */
2810 if (priv->vlan_state != SJA1105_VLAN_FILTERING_FULL &&
2811 vid_is_dsa_8021q(vlan->vid)) {
Vladimir Oltean31046a52021-02-13 22:43:18 +02002812 NL_SET_ERR_MSG_MOD(extack,
2813 "Range 1024-3071 reserved for dsa_8021q operation");
Vladimir Oltean1958d582021-01-09 02:01:53 +02002814 return -EBUSY;
2815 }
2816
Vladimir Olteanb7a9e0d2021-01-09 02:01:46 +02002817 rc = sja1105_vlan_add_one(ds, port, vlan->vid, vlan->flags,
2818 &priv->bridge_vlans);
2819 if (rc < 0)
Vladimir Oltean1958d582021-01-09 02:01:53 +02002820 return rc;
Vladimir Olteanb7a9e0d2021-01-09 02:01:46 +02002821 if (rc > 0)
2822 vlan_table_changed = true;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002823
2824 if (!vlan_table_changed)
Vladimir Oltean1958d582021-01-09 02:01:53 +02002825 return 0;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002826
Vladimir Oltean1958d582021-01-09 02:01:53 +02002827 return sja1105_build_vlan_table(priv, true);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002828}
2829
2830static int sja1105_vlan_del(struct dsa_switch *ds, int port,
2831 const struct switchdev_obj_port_vlan *vlan)
2832{
2833 struct sja1105_private *priv = ds->priv;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002834 bool vlan_table_changed = false;
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002835 int rc;
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002836
Vladimir Olteanb7a9e0d2021-01-09 02:01:46 +02002837 rc = sja1105_vlan_del_one(ds, port, vlan->vid, &priv->bridge_vlans);
2838 if (rc > 0)
2839 vlan_table_changed = true;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002840
2841 if (!vlan_table_changed)
2842 return 0;
2843
2844 return sja1105_build_vlan_table(priv, true);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002845}
2846
Vladimir Oltean5899ee32020-09-10 19:48:56 +03002847static int sja1105_dsa_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid,
2848 u16 flags)
2849{
2850 struct sja1105_private *priv = ds->priv;
2851 int rc;
2852
2853 rc = sja1105_vlan_add_one(ds, port, vid, flags, &priv->dsa_8021q_vlans);
2854 if (rc <= 0)
2855 return rc;
2856
2857 return sja1105_build_vlan_table(priv, true);
2858}
2859
2860static int sja1105_dsa_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid)
2861{
2862 struct sja1105_private *priv = ds->priv;
2863 int rc;
2864
2865 rc = sja1105_vlan_del_one(ds, port, vid, &priv->dsa_8021q_vlans);
2866 if (!rc)
2867 return 0;
2868
2869 return sja1105_build_vlan_table(priv, true);
2870}
2871
2872static const struct dsa_8021q_ops sja1105_dsa_8021q_ops = {
2873 .vlan_add = sja1105_dsa_8021q_vlan_add,
2874 .vlan_del = sja1105_dsa_8021q_vlan_del,
2875};
2876
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03002877/* The programming model for the SJA1105 switch is "all-at-once" via static
2878 * configuration tables. Some of these can be dynamically modified at runtime,
2879 * but not the xMII mode parameters table.
2880 * Furthermode, some PHYs may not have crystals for generating their clocks
2881 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
2882 * ref_clk pin. So port clocking needs to be initialized early, before
2883 * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
2884 * Setting correct PHY link speed does not matter now.
2885 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
2886 * bindings are not yet parsed by DSA core. We need to parse early so that we
2887 * can populate the xMII mode parameters table.
2888 */
2889static int sja1105_setup(struct dsa_switch *ds)
2890{
2891 struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
2892 struct sja1105_private *priv = ds->priv;
2893 int rc;
2894
2895 rc = sja1105_parse_dt(priv, ports);
2896 if (rc < 0) {
2897 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
2898 return rc;
2899 }
Vladimir Olteanf5b86312019-05-02 23:23:32 +03002900
2901 /* Error out early if internal delays are required through DT
2902 * and we can't apply them.
2903 */
2904 rc = sja1105_parse_rgmii_delays(priv, ports);
2905 if (rc < 0) {
2906 dev_err(ds->dev, "RGMII delay not supported\n");
2907 return rc;
2908 }
2909
Vladimir Oltean61c77122019-10-12 02:18:14 +03002910 rc = sja1105_ptp_clock_register(ds);
Vladimir Olteanbb77f362019-06-08 15:04:34 +03002911 if (rc < 0) {
2912 dev_err(ds->dev, "Failed to register PTP clock: %d\n", rc);
2913 return rc;
2914 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03002915 /* Create and send configuration down to device */
2916 rc = sja1105_static_config_load(priv, ports);
2917 if (rc < 0) {
2918 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
2919 return rc;
2920 }
2921 /* Configure the CGU (PHY link modes and speeds) */
2922 rc = sja1105_clocking_setup(priv);
2923 if (rc < 0) {
2924 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
2925 return rc;
2926 }
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03002927 /* On SJA1105, VLAN filtering per se is always enabled in hardware.
2928 * The only thing we can do to disable it is lie about what the 802.1Q
2929 * EtherType is.
2930 * So it will still try to apply VLAN filtering, but all ingress
2931 * traffic (except frames received with EtherType of ETH_P_SJA1105)
2932 * will be internally tagged with a distorted VLAN header where the
2933 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
2934 */
2935 ds->vlan_filtering_is_global = true;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03002936
Vladimir Oltean5f06c632019-09-15 05:00:01 +03002937 /* Advertise the 8 egress queues */
2938 ds->num_tx_queues = SJA1105_NUM_TC;
2939
Vladimir Olteanc279c722020-03-27 21:55:45 +02002940 ds->mtu_enforcement_ingress = true;
2941
Vladimir Oltean0a7bdbc2020-09-26 02:04:19 +03002942 rc = sja1105_devlink_setup(ds);
Vladimir Oltean2cafa722020-05-12 20:20:35 +03002943 if (rc < 0)
2944 return rc;
2945
Vladimir Oltean227d07a2019-05-05 13:19:27 +03002946 /* The DSA/switchdev model brings up switch ports in standalone mode by
2947 * default, and that means vlan_filtering is 0 since they're not under
2948 * a bridge, so it's safe to set up switch tagging at this time.
2949 */
Vladimir Olteanbbed0bb2020-09-21 03:10:30 +03002950 rtnl_lock();
2951 rc = sja1105_setup_8021q_tagging(ds, true);
2952 rtnl_unlock();
2953
2954 return rc;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03002955}
2956
Vladimir Olteanf3097be2019-06-08 15:04:42 +03002957static void sja1105_teardown(struct dsa_switch *ds)
2958{
2959 struct sja1105_private *priv = ds->priv;
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002960 struct sja1105_bridge_vlan *v, *n;
Vladimir Olteana68578c22020-01-04 02:37:10 +02002961 int port;
2962
2963 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
2964 struct sja1105_port *sp = &priv->ports[port];
2965
2966 if (!dsa_is_user_port(ds, port))
2967 continue;
2968
Vladimir Oltean52c0d4e2020-02-29 22:30:07 +02002969 if (sp->xmit_worker)
2970 kthread_destroy_worker(sp->xmit_worker);
Vladimir Olteana68578c22020-01-04 02:37:10 +02002971 }
Vladimir Olteanf3097be2019-06-08 15:04:42 +03002972
Vladimir Oltean0a7bdbc2020-09-26 02:04:19 +03002973 sja1105_devlink_teardown(ds);
Vladimir Olteana6af7762020-03-29 14:52:02 +03002974 sja1105_flower_teardown(ds);
Vladimir Oltean317ab5b2019-09-15 05:00:02 +03002975 sja1105_tas_teardown(ds);
Vladimir Oltean61c77122019-10-12 02:18:14 +03002976 sja1105_ptp_clock_unregister(ds);
Vladimir Oltean6cb0abb2019-08-05 01:38:46 +03002977 sja1105_static_config_free(&priv->static_config);
Vladimir Olteanec5ae612020-05-12 20:20:29 +03002978
2979 list_for_each_entry_safe(v, n, &priv->dsa_8021q_vlans, list) {
2980 list_del(&v->list);
2981 kfree(v);
2982 }
2983
2984 list_for_each_entry_safe(v, n, &priv->bridge_vlans, list) {
2985 list_del(&v->list);
2986 kfree(v);
2987 }
Vladimir Olteanf3097be2019-06-08 15:04:42 +03002988}
2989
Vladimir Olteane9bf9692019-08-25 22:46:30 +03002990static int sja1105_port_enable(struct dsa_switch *ds, int port,
2991 struct phy_device *phy)
2992{
2993 struct net_device *slave;
2994
2995 if (!dsa_is_user_port(ds, port))
2996 return 0;
2997
Vivien Didelot68bb8ea2019-10-21 16:51:15 -04002998 slave = dsa_to_port(ds, port)->slave;
Vladimir Olteane9bf9692019-08-25 22:46:30 +03002999
3000 slave->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3001
3002 return 0;
3003}
3004
Vladimir Olteana68578c22020-01-04 02:37:10 +02003005static void sja1105_port_disable(struct dsa_switch *ds, int port)
3006{
3007 struct sja1105_private *priv = ds->priv;
3008 struct sja1105_port *sp = &priv->ports[port];
3009
3010 if (!dsa_is_user_port(ds, port))
3011 return;
3012
3013 kthread_cancel_work_sync(&sp->xmit_work);
3014 skb_queue_purge(&sp->xmit_queue);
3015}
3016
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003017static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
Vladimir Oltean47ed9852019-06-08 15:04:35 +03003018 struct sk_buff *skb, bool takets)
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003019{
3020 struct sja1105_mgmt_entry mgmt_route = {0};
3021 struct sja1105_private *priv = ds->priv;
3022 struct ethhdr *hdr;
3023 int timeout = 10;
3024 int rc;
3025
3026 hdr = eth_hdr(skb);
3027
3028 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
3029 mgmt_route.destports = BIT(port);
3030 mgmt_route.enfport = 1;
Vladimir Oltean47ed9852019-06-08 15:04:35 +03003031 mgmt_route.tsreg = 0;
3032 mgmt_route.takets = takets;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003033
3034 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3035 slot, &mgmt_route, true);
3036 if (rc < 0) {
3037 kfree_skb(skb);
3038 return rc;
3039 }
3040
3041 /* Transfer skb to the host port. */
Vivien Didelot68bb8ea2019-10-21 16:51:15 -04003042 dsa_enqueue_skb(skb, dsa_to_port(ds, port)->slave);
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003043
3044 /* Wait until the switch has processed the frame */
3045 do {
3046 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
3047 slot, &mgmt_route);
3048 if (rc < 0) {
3049 dev_err_ratelimited(priv->ds->dev,
3050 "failed to poll for mgmt route\n");
3051 continue;
3052 }
3053
3054 /* UM10944: The ENFPORT flag of the respective entry is
3055 * cleared when a match is found. The host can use this
3056 * flag as an acknowledgment.
3057 */
3058 cpu_relax();
3059 } while (mgmt_route.enfport && --timeout);
3060
3061 if (!timeout) {
3062 /* Clean up the management route so that a follow-up
3063 * frame may not match on it by mistake.
Vladimir Oltean2a7e7402019-06-03 00:15:33 +03003064 * This is only hardware supported on P/Q/R/S - on E/T it is
3065 * a no-op and we are silently discarding the -EOPNOTSUPP.
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003066 */
3067 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
3068 slot, &mgmt_route, false);
3069 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
3070 }
3071
3072 return NETDEV_TX_OK;
3073}
3074
Vladimir Olteana68578c22020-01-04 02:37:10 +02003075#define work_to_port(work) \
3076 container_of((work), struct sja1105_port, xmit_work)
3077#define tagger_to_sja1105(t) \
3078 container_of((t), struct sja1105_private, tagger_data)
3079
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003080/* Deferred work is unfortunately necessary because setting up the management
3081 * route cannot be done from atomit context (SPI transfer takes a sleepable
3082 * lock on the bus)
3083 */
Vladimir Olteana68578c22020-01-04 02:37:10 +02003084static void sja1105_port_deferred_xmit(struct kthread_work *work)
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003085{
Vladimir Olteana68578c22020-01-04 02:37:10 +02003086 struct sja1105_port *sp = work_to_port(work);
3087 struct sja1105_tagger_data *tagger_data = sp->data;
3088 struct sja1105_private *priv = tagger_to_sja1105(tagger_data);
3089 int port = sp - priv->ports;
3090 struct sk_buff *skb;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003091
Vladimir Olteana68578c22020-01-04 02:37:10 +02003092 while ((skb = skb_dequeue(&sp->xmit_queue)) != NULL) {
3093 struct sk_buff *clone = DSA_SKB_CB(skb)->clone;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003094
Vladimir Olteana68578c22020-01-04 02:37:10 +02003095 mutex_lock(&priv->mgmt_lock);
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003096
Vladimir Olteana68578c22020-01-04 02:37:10 +02003097 sja1105_mgmt_xmit(priv->ds, port, 0, skb, !!clone);
Vladimir Oltean47ed9852019-06-08 15:04:35 +03003098
Vladimir Olteana68578c22020-01-04 02:37:10 +02003099 /* The clone, if there, was made by dsa_skb_tx_timestamp */
3100 if (clone)
3101 sja1105_ptp_txtstamp_skb(priv->ds, port, clone);
Vladimir Oltean47ed9852019-06-08 15:04:35 +03003102
Vladimir Olteana68578c22020-01-04 02:37:10 +02003103 mutex_unlock(&priv->mgmt_lock);
3104 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003105}
3106
Vladimir Oltean84567212019-05-02 23:23:36 +03003107/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
3108 * which cannot be reconfigured at runtime. So a switch reset is required.
3109 */
3110static int sja1105_set_ageing_time(struct dsa_switch *ds,
3111 unsigned int ageing_time)
3112{
3113 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
3114 struct sja1105_private *priv = ds->priv;
3115 struct sja1105_table *table;
3116 unsigned int maxage;
3117
3118 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
3119 l2_lookup_params = table->entries;
3120
3121 maxage = SJA1105_AGEING_TIME_MS(ageing_time);
3122
3123 if (l2_lookup_params->maxage == maxage)
3124 return 0;
3125
3126 l2_lookup_params->maxage = maxage;
3127
Vladimir Oltean2eea1fa2019-11-12 23:22:00 +02003128 return sja1105_static_config_reload(priv, SJA1105_AGEING_TIME);
Vladimir Oltean84567212019-05-02 23:23:36 +03003129}
3130
Vladimir Olteanc279c722020-03-27 21:55:45 +02003131static int sja1105_change_mtu(struct dsa_switch *ds, int port, int new_mtu)
3132{
Vladimir Olteanc279c722020-03-27 21:55:45 +02003133 struct sja1105_l2_policing_entry *policing;
3134 struct sja1105_private *priv = ds->priv;
Vladimir Olteanc279c722020-03-27 21:55:45 +02003135
3136 new_mtu += VLAN_ETH_HLEN + ETH_FCS_LEN;
3137
3138 if (dsa_is_cpu_port(ds, port))
3139 new_mtu += VLAN_HLEN;
3140
3141 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3142
Vladimir Olteana7cc0812020-03-29 14:52:01 +03003143 if (policing[port].maxlen == new_mtu)
Vladimir Olteanc279c722020-03-27 21:55:45 +02003144 return 0;
3145
Vladimir Olteana7cc0812020-03-29 14:52:01 +03003146 policing[port].maxlen = new_mtu;
Vladimir Olteanc279c722020-03-27 21:55:45 +02003147
3148 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3149}
3150
3151static int sja1105_get_max_mtu(struct dsa_switch *ds, int port)
3152{
3153 return 2043 - VLAN_ETH_HLEN - ETH_FCS_LEN;
3154}
3155
Vladimir Oltean317ab5b2019-09-15 05:00:02 +03003156static int sja1105_port_setup_tc(struct dsa_switch *ds, int port,
3157 enum tc_setup_type type,
3158 void *type_data)
3159{
3160 switch (type) {
3161 case TC_SETUP_QDISC_TAPRIO:
3162 return sja1105_setup_tc_taprio(ds, port, type_data);
Vladimir Oltean4d752502020-05-28 03:27:58 +03003163 case TC_SETUP_QDISC_CBS:
3164 return sja1105_setup_tc_cbs(ds, port, type_data);
Vladimir Oltean317ab5b2019-09-15 05:00:02 +03003165 default:
3166 return -EOPNOTSUPP;
3167 }
3168}
3169
Vladimir Oltean511e6ca2019-10-04 03:33:47 +03003170/* We have a single mirror (@to) port, but can configure ingress and egress
3171 * mirroring on all other (@from) ports.
3172 * We need to allow mirroring rules only as long as the @to port is always the
3173 * same, and we need to unset the @to port from mirr_port only when there is no
3174 * mirroring rule that references it.
3175 */
3176static int sja1105_mirror_apply(struct sja1105_private *priv, int from, int to,
3177 bool ingress, bool enabled)
3178{
3179 struct sja1105_general_params_entry *general_params;
3180 struct sja1105_mac_config_entry *mac;
3181 struct sja1105_table *table;
3182 bool already_enabled;
3183 u64 new_mirr_port;
3184 int rc;
3185
3186 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
3187 general_params = table->entries;
3188
3189 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3190
3191 already_enabled = (general_params->mirr_port != SJA1105_NUM_PORTS);
3192 if (already_enabled && enabled && general_params->mirr_port != to) {
3193 dev_err(priv->ds->dev,
3194 "Delete mirroring rules towards port %llu first\n",
3195 general_params->mirr_port);
3196 return -EBUSY;
3197 }
3198
3199 new_mirr_port = to;
3200 if (!enabled) {
3201 bool keep = false;
3202 int port;
3203
3204 /* Anybody still referencing mirr_port? */
3205 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3206 if (mac[port].ing_mirr || mac[port].egr_mirr) {
3207 keep = true;
3208 break;
3209 }
3210 }
3211 /* Unset already_enabled for next time */
3212 if (!keep)
3213 new_mirr_port = SJA1105_NUM_PORTS;
3214 }
3215 if (new_mirr_port != general_params->mirr_port) {
3216 general_params->mirr_port = new_mirr_port;
3217
3218 rc = sja1105_dynamic_config_write(priv, BLK_IDX_GENERAL_PARAMS,
3219 0, general_params, true);
3220 if (rc < 0)
3221 return rc;
3222 }
3223
3224 if (ingress)
3225 mac[from].ing_mirr = enabled;
3226 else
3227 mac[from].egr_mirr = enabled;
3228
3229 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, from,
3230 &mac[from], true);
3231}
3232
3233static int sja1105_mirror_add(struct dsa_switch *ds, int port,
3234 struct dsa_mall_mirror_tc_entry *mirror,
3235 bool ingress)
3236{
3237 return sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3238 ingress, true);
3239}
3240
3241static void sja1105_mirror_del(struct dsa_switch *ds, int port,
3242 struct dsa_mall_mirror_tc_entry *mirror)
3243{
3244 sja1105_mirror_apply(ds->priv, port, mirror->to_local_port,
3245 mirror->ingress, false);
3246}
3247
Vladimir Olteana7cc0812020-03-29 14:52:01 +03003248static int sja1105_port_policer_add(struct dsa_switch *ds, int port,
3249 struct dsa_mall_policer_tc_entry *policer)
3250{
3251 struct sja1105_l2_policing_entry *policing;
3252 struct sja1105_private *priv = ds->priv;
3253
3254 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3255
3256 /* In hardware, every 8 microseconds the credit level is incremented by
3257 * the value of RATE bytes divided by 64, up to a maximum of SMAX
3258 * bytes.
3259 */
3260 policing[port].rate = div_u64(512 * policer->rate_bytes_per_sec,
3261 1000000);
Po Liu5f035af2020-06-29 14:54:16 +08003262 policing[port].smax = policer->burst;
Vladimir Olteana7cc0812020-03-29 14:52:01 +03003263
3264 return sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3265}
3266
3267static void sja1105_port_policer_del(struct dsa_switch *ds, int port)
3268{
3269 struct sja1105_l2_policing_entry *policing;
3270 struct sja1105_private *priv = ds->priv;
3271
3272 policing = priv->static_config.tables[BLK_IDX_L2_POLICING].entries;
3273
3274 policing[port].rate = SJA1105_RATE_MBPS(1000);
3275 policing[port].smax = 65535;
3276
3277 sja1105_static_config_reload(priv, SJA1105_BEST_EFFORT_POLICING);
3278}
3279
Vladimir Oltean4d942352021-02-12 17:16:00 +02003280static int sja1105_port_set_learning(struct sja1105_private *priv, int port,
3281 bool enabled)
3282{
3283 struct sja1105_mac_config_entry *mac;
3284 int rc;
3285
3286 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
3287
3288 mac[port].dyn_learn = !!(priv->learn_ena & BIT(port));
3289
3290 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
3291 &mac[port], true);
3292 if (rc)
3293 return rc;
3294
3295 if (enabled)
3296 priv->learn_ena |= BIT(port);
3297 else
3298 priv->learn_ena &= ~BIT(port);
3299
3300 return 0;
3301}
3302
3303/* Common function for unicast and broadcast flood configuration.
3304 * Flooding is configured between each {ingress, egress} port pair, and since
3305 * the bridge's semantics are those of "egress flooding", it means we must
3306 * enable flooding towards this port from all ingress ports that are in the
3307 * same bridge. In practice, we just enable flooding from all possible ingress
3308 * ports regardless of whether they're in the same bridge or not, since the
3309 * reach_port configuration will not allow flooded frames to leak across
3310 * bridging domains anyway.
3311 */
3312static int sja1105_port_ucast_bcast_flood(struct sja1105_private *priv, int to,
3313 struct switchdev_brport_flags flags)
3314{
3315 struct sja1105_l2_forwarding_entry *l2_fwd;
3316 int from, rc;
3317
3318 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
3319
3320 for (from = 0; from < priv->ds->num_ports; from++) {
3321 if (dsa_is_unused_port(priv->ds, from))
3322 continue;
3323 if (from == to)
3324 continue;
3325
3326 /* Unicast */
3327 if (flags.mask & BR_FLOOD) {
3328 if (flags.val & BR_FLOOD)
3329 l2_fwd[from].fl_domain |= BIT(to);
3330 else
3331 l2_fwd[from].fl_domain &= ~BIT(to);
3332 }
3333 /* Broadcast */
3334 if (flags.mask & BR_BCAST_FLOOD) {
3335 if (flags.val & BR_BCAST_FLOOD)
3336 l2_fwd[from].bc_domain |= BIT(to);
3337 else
3338 l2_fwd[from].bc_domain &= ~BIT(to);
3339 }
3340
3341 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
3342 from, &l2_fwd[from], true);
3343 if (rc < 0)
3344 return rc;
3345 }
3346
3347 return 0;
3348}
3349
3350static int sja1105_port_mcast_flood(struct sja1105_private *priv, int to,
3351 struct switchdev_brport_flags flags,
3352 struct netlink_ext_ack *extack)
3353{
3354 struct sja1105_l2_lookup_entry *l2_lookup;
3355 struct sja1105_table *table;
3356 int match;
3357
3358 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
3359 l2_lookup = table->entries;
3360
3361 for (match = 0; match < table->entry_count; match++)
3362 if (l2_lookup[match].macaddr == SJA1105_UNKNOWN_MULTICAST &&
3363 l2_lookup[match].mask_macaddr == SJA1105_UNKNOWN_MULTICAST)
3364 break;
3365
3366 if (match == table->entry_count) {
3367 NL_SET_ERR_MSG_MOD(extack,
3368 "Could not find FDB entry for unknown multicast");
3369 return -ENOSPC;
3370 }
3371
3372 if (flags.val & BR_MCAST_FLOOD)
3373 l2_lookup[match].destports |= BIT(to);
3374 else
3375 l2_lookup[match].destports &= ~BIT(to);
3376
3377 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
3378 l2_lookup[match].index,
3379 &l2_lookup[match],
3380 true);
3381}
3382
3383static int sja1105_port_pre_bridge_flags(struct dsa_switch *ds, int port,
3384 struct switchdev_brport_flags flags,
3385 struct netlink_ext_ack *extack)
3386{
3387 struct sja1105_private *priv = ds->priv;
3388
3389 if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD |
3390 BR_BCAST_FLOOD))
3391 return -EINVAL;
3392
3393 if (flags.mask & (BR_FLOOD | BR_MCAST_FLOOD) &&
3394 !priv->info->can_limit_mcast_flood) {
3395 bool multicast = !!(flags.val & BR_MCAST_FLOOD);
3396 bool unicast = !!(flags.val & BR_FLOOD);
3397
3398 if (unicast != multicast) {
3399 NL_SET_ERR_MSG_MOD(extack,
3400 "This chip cannot configure multicast flooding independently of unicast");
3401 return -EINVAL;
3402 }
3403 }
3404
3405 return 0;
3406}
3407
3408static int sja1105_port_bridge_flags(struct dsa_switch *ds, int port,
3409 struct switchdev_brport_flags flags,
3410 struct netlink_ext_ack *extack)
3411{
3412 struct sja1105_private *priv = ds->priv;
3413 int rc;
3414
3415 if (flags.mask & BR_LEARNING) {
3416 bool learn_ena = !!(flags.val & BR_LEARNING);
3417
3418 rc = sja1105_port_set_learning(priv, port, learn_ena);
3419 if (rc)
3420 return rc;
3421 }
3422
3423 if (flags.mask & (BR_FLOOD | BR_BCAST_FLOOD)) {
3424 rc = sja1105_port_ucast_bcast_flood(priv, port, flags);
3425 if (rc)
3426 return rc;
3427 }
3428
3429 /* For chips that can't offload BR_MCAST_FLOOD independently, there
3430 * is nothing to do here, we ensured the configuration is in sync by
3431 * offloading BR_FLOOD.
3432 */
3433 if (flags.mask & BR_MCAST_FLOOD && priv->info->can_limit_mcast_flood) {
3434 rc = sja1105_port_mcast_flood(priv, port, flags,
3435 extack);
3436 if (rc)
3437 return rc;
3438 }
3439
3440 return 0;
3441}
3442
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003443static const struct dsa_switch_ops sja1105_switch_ops = {
3444 .get_tag_protocol = sja1105_get_tag_protocol,
3445 .setup = sja1105_setup,
Vladimir Olteanf3097be2019-06-08 15:04:42 +03003446 .teardown = sja1105_teardown,
Vladimir Oltean84567212019-05-02 23:23:36 +03003447 .set_ageing_time = sja1105_set_ageing_time,
Vladimir Olteanc279c722020-03-27 21:55:45 +02003448 .port_change_mtu = sja1105_change_mtu,
3449 .port_max_mtu = sja1105_get_max_mtu,
Vladimir Olteanad9f2992019-05-02 23:23:38 +03003450 .phylink_validate = sja1105_phylink_validate,
Vladimir Olteanffe10e62020-03-20 13:29:37 +02003451 .phylink_mac_link_state = sja1105_mac_pcs_get_state,
Vladimir Olteanaf7cd032019-05-28 20:38:17 +03003452 .phylink_mac_config = sja1105_mac_config,
Vladimir Oltean8400cff2019-06-08 16:03:44 +03003453 .phylink_mac_link_up = sja1105_mac_link_up,
3454 .phylink_mac_link_down = sja1105_mac_link_down,
Vladimir Oltean52c34e62019-05-02 23:23:35 +03003455 .get_strings = sja1105_get_strings,
3456 .get_ethtool_stats = sja1105_get_ethtool_stats,
3457 .get_sset_count = sja1105_get_sset_count,
Vladimir Olteanbb77f362019-06-08 15:04:34 +03003458 .get_ts_info = sja1105_get_ts_info,
Vladimir Olteane9bf9692019-08-25 22:46:30 +03003459 .port_enable = sja1105_port_enable,
Vladimir Olteana68578c22020-01-04 02:37:10 +02003460 .port_disable = sja1105_port_disable,
Vladimir Oltean291d1e72019-05-02 23:23:31 +03003461 .port_fdb_dump = sja1105_fdb_dump,
3462 .port_fdb_add = sja1105_fdb_add,
3463 .port_fdb_del = sja1105_fdb_del,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003464 .port_bridge_join = sja1105_bridge_join,
3465 .port_bridge_leave = sja1105_bridge_leave,
Vladimir Oltean4d942352021-02-12 17:16:00 +02003466 .port_pre_bridge_flags = sja1105_port_pre_bridge_flags,
3467 .port_bridge_flags = sja1105_port_bridge_flags,
Vladimir Oltean640f7632019-05-05 13:19:28 +03003468 .port_stp_state_set = sja1105_bridge_stp_state_set,
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03003469 .port_vlan_filtering = sja1105_vlan_filtering,
3470 .port_vlan_add = sja1105_vlan_add,
3471 .port_vlan_del = sja1105_vlan_del,
Vladimir Oltean291d1e72019-05-02 23:23:31 +03003472 .port_mdb_add = sja1105_mdb_add,
3473 .port_mdb_del = sja1105_mdb_del,
Vladimir Olteana602afd2019-06-08 15:04:43 +03003474 .port_hwtstamp_get = sja1105_hwtstamp_get,
3475 .port_hwtstamp_set = sja1105_hwtstamp_set,
Vladimir Olteanf3097be2019-06-08 15:04:42 +03003476 .port_rxtstamp = sja1105_port_rxtstamp,
Vladimir Oltean47ed9852019-06-08 15:04:35 +03003477 .port_txtstamp = sja1105_port_txtstamp,
Vladimir Oltean317ab5b2019-09-15 05:00:02 +03003478 .port_setup_tc = sja1105_port_setup_tc,
Vladimir Oltean511e6ca2019-10-04 03:33:47 +03003479 .port_mirror_add = sja1105_mirror_add,
3480 .port_mirror_del = sja1105_mirror_del,
Vladimir Olteana7cc0812020-03-29 14:52:01 +03003481 .port_policer_add = sja1105_port_policer_add,
3482 .port_policer_del = sja1105_port_policer_del,
Vladimir Olteana6af7762020-03-29 14:52:02 +03003483 .cls_flower_add = sja1105_cls_flower_add,
3484 .cls_flower_del = sja1105_cls_flower_del,
Vladimir Oltean834f8932020-05-05 22:20:56 +03003485 .cls_flower_stats = sja1105_cls_flower_stats,
Vladimir Olteanac02a452020-05-10 19:37:43 +03003486 .crosschip_bridge_join = sja1105_crosschip_bridge_join,
3487 .crosschip_bridge_leave = sja1105_crosschip_bridge_leave,
Vladimir Oltean2cafa722020-05-12 20:20:35 +03003488 .devlink_param_get = sja1105_devlink_param_get,
3489 .devlink_param_set = sja1105_devlink_param_set,
Vladimir Olteanff4cf8e2020-09-26 02:04:21 +03003490 .devlink_info_get = sja1105_devlink_info_get,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003491};
3492
Vladimir Oltean0b0e2992020-08-03 19:48:23 +03003493static const struct of_device_id sja1105_dt_ids[];
3494
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003495static int sja1105_check_device_id(struct sja1105_private *priv)
3496{
3497 const struct sja1105_regs *regs = priv->info->regs;
3498 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
3499 struct device *dev = &priv->spidev->dev;
Vladimir Oltean0b0e2992020-08-03 19:48:23 +03003500 const struct of_device_id *match;
Vladimir Olteandff79622019-10-01 22:18:00 +03003501 u32 device_id;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003502 u64 part_no;
3503 int rc;
3504
Vladimir Oltean34d76e92019-11-09 13:32:22 +02003505 rc = sja1105_xfer_u32(priv, SPI_READ, regs->device_id, &device_id,
3506 NULL);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003507 if (rc < 0)
3508 return rc;
3509
Vladimir Oltean1bd44872019-10-01 22:18:01 +03003510 rc = sja1105_xfer_buf(priv, SPI_READ, regs->prod_id, prod_id,
3511 SJA1105_SIZE_DEVICE_ID);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003512 if (rc < 0)
3513 return rc;
3514
3515 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
3516
Nathan Chancellor5978fac2020-08-21 15:25:16 -07003517 for (match = sja1105_dt_ids; match->compatible[0]; match++) {
Vladimir Oltean0b0e2992020-08-03 19:48:23 +03003518 const struct sja1105_info *info = match->data;
3519
3520 /* Is what's been probed in our match table at all? */
3521 if (info->device_id != device_id || info->part_no != part_no)
3522 continue;
3523
3524 /* But is it what's in the device tree? */
3525 if (priv->info->device_id != device_id ||
3526 priv->info->part_no != part_no) {
3527 dev_warn(dev, "Device tree specifies chip %s but found %s, please fix it!\n",
3528 priv->info->name, info->name);
3529 /* It isn't. No problem, pick that up. */
3530 priv->info = info;
3531 }
3532
3533 return 0;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003534 }
3535
Vladimir Oltean0b0e2992020-08-03 19:48:23 +03003536 dev_err(dev, "Unexpected {device ID, part number}: 0x%x 0x%llx\n",
3537 device_id, part_no);
3538
3539 return -ENODEV;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003540}
3541
3542static int sja1105_probe(struct spi_device *spi)
3543{
Vladimir Oltean844d7ed2019-06-08 15:04:40 +03003544 struct sja1105_tagger_data *tagger_data;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003545 struct device *dev = &spi->dev;
3546 struct sja1105_private *priv;
3547 struct dsa_switch *ds;
Vladimir Olteana68578c22020-01-04 02:37:10 +02003548 int rc, port;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003549
3550 if (!dev->of_node) {
3551 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
3552 return -EINVAL;
3553 }
3554
3555 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
3556 if (!priv)
3557 return -ENOMEM;
3558
3559 /* Configure the optional reset pin and bring up switch */
3560 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
3561 if (IS_ERR(priv->reset_gpio))
3562 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
3563 else
3564 sja1105_hw_reset(priv->reset_gpio, 1, 1);
3565
3566 /* Populate our driver private structure (priv) based on
3567 * the device tree node that was probed (spi)
3568 */
3569 priv->spidev = spi;
3570 spi_set_drvdata(spi, priv);
3571
3572 /* Configure the SPI bus */
3573 spi->bits_per_word = 8;
3574 rc = spi_setup(spi);
3575 if (rc < 0) {
3576 dev_err(dev, "Could not init SPI\n");
3577 return rc;
3578 }
3579
3580 priv->info = of_device_get_match_data(dev);
3581
3582 /* Detect hardware device */
3583 rc = sja1105_check_device_id(priv);
3584 if (rc < 0) {
3585 dev_err(dev, "Device ID check failed: %d\n", rc);
3586 return rc;
3587 }
3588
3589 dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
3590
Vivien Didelot7e99e342019-10-21 16:51:30 -04003591 ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003592 if (!ds)
3593 return -ENOMEM;
3594
Vivien Didelot7e99e342019-10-21 16:51:30 -04003595 ds->dev = dev;
3596 ds->num_ports = SJA1105_NUM_PORTS;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003597 ds->ops = &sja1105_switch_ops;
3598 ds->priv = priv;
3599 priv->ds = ds;
3600
Vladimir Oltean844d7ed2019-06-08 15:04:40 +03003601 tagger_data = &priv->tagger_data;
Vladimir Oltean844d7ed2019-06-08 15:04:40 +03003602
Vivien Didelotd5a619b2019-10-21 16:51:28 -04003603 mutex_init(&priv->ptp_data.lock);
3604 mutex_init(&priv->mgmt_lock);
3605
Vladimir Oltean5899ee32020-09-10 19:48:56 +03003606 priv->dsa_8021q_ctx = devm_kzalloc(dev, sizeof(*priv->dsa_8021q_ctx),
3607 GFP_KERNEL);
3608 if (!priv->dsa_8021q_ctx)
3609 return -ENOMEM;
3610
3611 priv->dsa_8021q_ctx->ops = &sja1105_dsa_8021q_ops;
Vladimir Olteanbbed0bb2020-09-21 03:10:30 +03003612 priv->dsa_8021q_ctx->proto = htons(ETH_P_8021Q);
Vladimir Oltean5899ee32020-09-10 19:48:56 +03003613 priv->dsa_8021q_ctx->ds = ds;
3614
3615 INIT_LIST_HEAD(&priv->dsa_8021q_ctx->crosschip_links);
Vladimir Olteanec5ae612020-05-12 20:20:29 +03003616 INIT_LIST_HEAD(&priv->bridge_vlans);
3617 INIT_LIST_HEAD(&priv->dsa_8021q_vlans);
Vladimir Olteanac02a452020-05-10 19:37:43 +03003618
Vivien Didelotd5a619b2019-10-21 16:51:28 -04003619 sja1105_tas_setup(ds);
Vladimir Olteana6af7762020-03-29 14:52:02 +03003620 sja1105_flower_setup(ds);
Vivien Didelotd5a619b2019-10-21 16:51:28 -04003621
3622 rc = dsa_register_switch(priv->ds);
3623 if (rc)
3624 return rc;
3625
Vladimir Oltean4d752502020-05-28 03:27:58 +03003626 if (IS_ENABLED(CONFIG_NET_SCH_CBS)) {
3627 priv->cbs = devm_kcalloc(dev, priv->info->num_cbs_shapers,
3628 sizeof(struct sja1105_cbs_entry),
3629 GFP_KERNEL);
3630 if (!priv->cbs)
3631 return -ENOMEM;
3632 }
3633
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003634 /* Connections between dsa_port and sja1105_port */
Vladimir Olteana68578c22020-01-04 02:37:10 +02003635 for (port = 0; port < SJA1105_NUM_PORTS; port++) {
3636 struct sja1105_port *sp = &priv->ports[port];
3637 struct dsa_port *dp = dsa_to_port(ds, port);
3638 struct net_device *slave;
Vladimir Oltean84eeb5d2020-05-12 20:20:34 +03003639 int subvlan;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003640
Vladimir Olteana68578c22020-01-04 02:37:10 +02003641 if (!dsa_is_user_port(ds, port))
3642 continue;
3643
3644 dp->priv = sp;
3645 sp->dp = dp;
Vladimir Oltean844d7ed2019-06-08 15:04:40 +03003646 sp->data = tagger_data;
Vladimir Olteana68578c22020-01-04 02:37:10 +02003647 slave = dp->slave;
3648 kthread_init_work(&sp->xmit_work, sja1105_port_deferred_xmit);
3649 sp->xmit_worker = kthread_create_worker(0, "%s_xmit",
3650 slave->name);
3651 if (IS_ERR(sp->xmit_worker)) {
3652 rc = PTR_ERR(sp->xmit_worker);
3653 dev_err(ds->dev,
3654 "failed to create deferred xmit thread: %d\n",
3655 rc);
3656 goto out;
3657 }
3658 skb_queue_head_init(&sp->xmit_queue);
Vladimir Oltean38b5bee2020-05-12 20:20:32 +03003659 sp->xmit_tpid = ETH_P_SJA1105;
Vladimir Oltean84eeb5d2020-05-12 20:20:34 +03003660
3661 for (subvlan = 0; subvlan < DSA_8021Q_N_SUBVLAN; subvlan++)
3662 sp->subvlan_map[subvlan] = VLAN_N_VID;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003663 }
Vladimir Oltean227d07a2019-05-05 13:19:27 +03003664
Vivien Didelotd5a619b2019-10-21 16:51:28 -04003665 return 0;
Vladimir Olteana68578c22020-01-04 02:37:10 +02003666out:
3667 while (port-- > 0) {
3668 struct sja1105_port *sp = &priv->ports[port];
3669
3670 if (!dsa_is_user_port(ds, port))
3671 continue;
3672
3673 kthread_destroy_worker(sp->xmit_worker);
3674 }
3675 return rc;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003676}
3677
3678static int sja1105_remove(struct spi_device *spi)
3679{
3680 struct sja1105_private *priv = spi_get_drvdata(spi);
3681
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003682 dsa_unregister_switch(priv->ds);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03003683 return 0;
3684}
3685
3686static const struct of_device_id sja1105_dt_ids[] = {
3687 { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
3688 { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
3689 { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
3690 { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
3691 { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
3692 { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
3693 { /* sentinel */ },
3694};
3695MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
3696
3697static struct spi_driver sja1105_driver = {
3698 .driver = {
3699 .name = "sja1105",
3700 .owner = THIS_MODULE,
3701 .of_match_table = of_match_ptr(sja1105_dt_ids),
3702 },
3703 .probe = sja1105_probe,
3704 .remove = sja1105_remove,
3705};
3706
3707module_spi_driver(sja1105_driver);
3708
3709MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
3710MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
3711MODULE_DESCRIPTION("SJA1105 Driver");
3712MODULE_LICENSE("GPL v2");