blob: cfdefd9f190510831088e465c572f39c0387ab59 [file] [log] [blame]
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/printk.h>
11#include <linux/spi/spi.h>
12#include <linux/errno.h>
13#include <linux/gpio/consumer.h>
Vladimir Olteanad9f2992019-05-02 23:23:38 +030014#include <linux/phylink.h>
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030015#include <linux/of.h>
16#include <linux/of_net.h>
17#include <linux/of_mdio.h>
18#include <linux/of_device.h>
19#include <linux/netdev_features.h>
20#include <linux/netdevice.h>
21#include <linux/if_bridge.h>
22#include <linux/if_ether.h>
Vladimir Oltean227d07a2019-05-05 13:19:27 +030023#include <linux/dsa/8021q.h>
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030024#include "sja1105.h"
25
26static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
27 unsigned int startup_delay)
28{
29 gpiod_set_value_cansleep(gpio, 1);
30 /* Wait for minimum reset pulse length */
31 msleep(pulse_len);
32 gpiod_set_value_cansleep(gpio, 0);
33 /* Wait until chip is ready after reset */
34 msleep(startup_delay);
35}
36
37static void
38sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
39 int from, int to, bool allow)
40{
41 if (allow) {
42 l2_fwd[from].bc_domain |= BIT(to);
43 l2_fwd[from].reach_port |= BIT(to);
44 l2_fwd[from].fl_domain |= BIT(to);
45 } else {
46 l2_fwd[from].bc_domain &= ~BIT(to);
47 l2_fwd[from].reach_port &= ~BIT(to);
48 l2_fwd[from].fl_domain &= ~BIT(to);
49 }
50}
51
52/* Structure used to temporarily transport device tree
53 * settings into sja1105_setup
54 */
55struct sja1105_dt_port {
56 phy_interface_t phy_mode;
57 sja1105_mii_role_t role;
58};
59
60static int sja1105_init_mac_settings(struct sja1105_private *priv)
61{
62 struct sja1105_mac_config_entry default_mac = {
63 /* Enable all 8 priority queues on egress.
64 * Every queue i holds top[i] - base[i] frames.
65 * Sum of top[i] - base[i] is 511 (max hardware limit).
66 */
67 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69 .enabled = {true, true, true, true, true, true, true, true},
70 /* Keep standard IFG of 12 bytes on egress. */
71 .ifg = 0,
72 /* Always put the MAC speed in automatic mode, where it can be
73 * retrieved from the PHY object through phylib and
74 * sja1105_adjust_port_config.
75 */
76 .speed = SJA1105_SPEED_AUTO,
77 /* No static correction for 1-step 1588 events */
78 .tp_delin = 0,
79 .tp_delout = 0,
80 /* Disable aging for critical TTEthernet traffic */
81 .maxage = 0xFF,
82 /* Internal VLAN (pvid) to apply to untagged ingress */
83 .vlanprio = 0,
84 .vlanid = 0,
85 .ing_mirr = false,
86 .egr_mirr = false,
87 /* Don't drop traffic with other EtherType than ETH_P_IP */
88 .drpnona664 = false,
89 /* Don't drop double-tagged traffic */
90 .drpdtag = false,
91 /* Don't drop untagged traffic */
92 .drpuntag = false,
93 /* Don't retag 802.1p (VID 0) traffic with the pvid */
94 .retag = false,
Vladimir Oltean640f7632019-05-05 13:19:28 +030095 /* Disable learning and I/O on user ports by default -
96 * STP will enable it.
97 */
98 .dyn_learn = false,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030099 .egress = false,
100 .ingress = false,
101 };
102 struct sja1105_mac_config_entry *mac;
103 struct sja1105_table *table;
104 int i;
105
106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108 /* Discard previous MAC Configuration Table */
109 if (table->entry_count) {
110 kfree(table->entries);
111 table->entry_count = 0;
112 }
113
114 table->entries = kcalloc(SJA1105_NUM_PORTS,
115 table->ops->unpacked_entry_size, GFP_KERNEL);
116 if (!table->entries)
117 return -ENOMEM;
118
119 /* Override table based on phylib DT bindings */
120 table->entry_count = SJA1105_NUM_PORTS;
121
122 mac = table->entries;
123
Vladimir Oltean640f7632019-05-05 13:19:28 +0300124 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300125 mac[i] = default_mac;
Vladimir Oltean640f7632019-05-05 13:19:28 +0300126 if (i == dsa_upstream_port(priv->ds, i)) {
127 /* STP doesn't get called for CPU port, so we need to
128 * set the I/O parameters statically.
129 */
130 mac[i].dyn_learn = true;
131 mac[i].ingress = true;
132 mac[i].egress = true;
133 }
134 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300135
136 return 0;
137}
138
139static int sja1105_init_mii_settings(struct sja1105_private *priv,
140 struct sja1105_dt_port *ports)
141{
142 struct device *dev = &priv->spidev->dev;
143 struct sja1105_xmii_params_entry *mii;
144 struct sja1105_table *table;
145 int i;
146
147 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
148
149 /* Discard previous xMII Mode Parameters Table */
150 if (table->entry_count) {
151 kfree(table->entries);
152 table->entry_count = 0;
153 }
154
155 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
156 table->ops->unpacked_entry_size, GFP_KERNEL);
157 if (!table->entries)
158 return -ENOMEM;
159
160 /* Override table based on phylib DT bindings */
161 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
162
163 mii = table->entries;
164
165 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
166 switch (ports[i].phy_mode) {
167 case PHY_INTERFACE_MODE_MII:
168 mii->xmii_mode[i] = XMII_MODE_MII;
169 break;
170 case PHY_INTERFACE_MODE_RMII:
171 mii->xmii_mode[i] = XMII_MODE_RMII;
172 break;
173 case PHY_INTERFACE_MODE_RGMII:
174 case PHY_INTERFACE_MODE_RGMII_ID:
175 case PHY_INTERFACE_MODE_RGMII_RXID:
176 case PHY_INTERFACE_MODE_RGMII_TXID:
177 mii->xmii_mode[i] = XMII_MODE_RGMII;
178 break;
179 default:
180 dev_err(dev, "Unsupported PHY mode %s!\n",
181 phy_modes(ports[i].phy_mode));
182 }
183
184 mii->phy_mac[i] = ports[i].role;
185 }
186 return 0;
187}
188
189static int sja1105_init_static_fdb(struct sja1105_private *priv)
190{
191 struct sja1105_table *table;
192
193 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
194
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300195 /* We only populate the FDB table through dynamic
196 * L2 Address Lookup entries
197 */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300198 if (table->entry_count) {
199 kfree(table->entries);
200 table->entry_count = 0;
201 }
202 return 0;
203}
204
205static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
206{
207 struct sja1105_table *table;
208 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
Vladimir Oltean84567212019-05-02 23:23:36 +0300209 /* Learned FDB entries are forgotten after 300 seconds */
210 .maxage = SJA1105_AGEING_TIME_MS(300000),
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300211 /* All entries within a FDB bin are available for learning */
212 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
213 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
214 .poly = 0x97,
215 /* This selects between Independent VLAN Learning (IVL) and
216 * Shared VLAN Learning (SVL)
217 */
218 .shared_learn = false,
219 /* Don't discard management traffic based on ENFPORT -
220 * we don't perform SMAC port enforcement anyway, so
221 * what we are setting here doesn't matter.
222 */
223 .no_enf_hostprt = false,
224 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
225 * Maybe correlate with no_linklocal_learn from bridge driver?
226 */
227 .no_mgmt_learn = true,
228 };
229
230 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
231
232 if (table->entry_count) {
233 kfree(table->entries);
234 table->entry_count = 0;
235 }
236
237 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
238 table->ops->unpacked_entry_size, GFP_KERNEL);
239 if (!table->entries)
240 return -ENOMEM;
241
242 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
243
244 /* This table only has a single entry */
245 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
246 default_l2_lookup_params;
247
248 return 0;
249}
250
251static int sja1105_init_static_vlan(struct sja1105_private *priv)
252{
253 struct sja1105_table *table;
254 struct sja1105_vlan_lookup_entry pvid = {
255 .ving_mirr = 0,
256 .vegr_mirr = 0,
257 .vmemb_port = 0,
258 .vlan_bc = 0,
259 .tag_port = 0,
260 .vlanid = 0,
261 };
262 int i;
263
264 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
265
266 /* The static VLAN table will only contain the initial pvid of 0.
Vladimir Oltean6666ceb2019-05-02 23:23:34 +0300267 * All other VLANs are to be configured through dynamic entries,
268 * and kept in the static configuration table as backing memory.
269 * The pvid of 0 is sufficient to pass traffic while the ports are
270 * standalone and when vlan_filtering is disabled. When filtering
271 * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
272 * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
273 * vlan' even when vlan_filtering is off, but it has no effect.
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300274 */
275 if (table->entry_count) {
276 kfree(table->entries);
277 table->entry_count = 0;
278 }
279
280 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
281 GFP_KERNEL);
282 if (!table->entries)
283 return -ENOMEM;
284
285 table->entry_count = 1;
286
287 /* VLAN ID 0: all DT-defined ports are members; no restrictions on
288 * forwarding; always transmit priority-tagged frames as untagged.
289 */
290 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
291 pvid.vmemb_port |= BIT(i);
292 pvid.vlan_bc |= BIT(i);
293 pvid.tag_port &= ~BIT(i);
294 }
295
296 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
297 return 0;
298}
299
300static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
301{
302 struct sja1105_l2_forwarding_entry *l2fwd;
303 struct sja1105_table *table;
304 int i, j;
305
306 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
307
308 if (table->entry_count) {
309 kfree(table->entries);
310 table->entry_count = 0;
311 }
312
313 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
314 table->ops->unpacked_entry_size, GFP_KERNEL);
315 if (!table->entries)
316 return -ENOMEM;
317
318 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
319
320 l2fwd = table->entries;
321
322 /* First 5 entries define the forwarding rules */
323 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
324 unsigned int upstream = dsa_upstream_port(priv->ds, i);
325
326 for (j = 0; j < SJA1105_NUM_TC; j++)
327 l2fwd[i].vlan_pmap[j] = j;
328
329 if (i == upstream)
330 continue;
331
332 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
333 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
334 }
335 /* Next 8 entries define VLAN PCP mapping from ingress to egress.
336 * Create a one-to-one mapping.
337 */
338 for (i = 0; i < SJA1105_NUM_TC; i++)
339 for (j = 0; j < SJA1105_NUM_PORTS; j++)
340 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
341
342 return 0;
343}
344
345static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
346{
347 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
348 /* Disallow dynamic reconfiguration of vlan_pmap */
349 .max_dynp = 0,
350 /* Use a single memory partition for all ingress queues */
351 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
352 };
353 struct sja1105_table *table;
354
355 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
356
357 if (table->entry_count) {
358 kfree(table->entries);
359 table->entry_count = 0;
360 }
361
362 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
363 table->ops->unpacked_entry_size, GFP_KERNEL);
364 if (!table->entries)
365 return -ENOMEM;
366
367 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
368
369 /* This table only has a single entry */
370 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
371 default_l2fwd_params;
372
373 return 0;
374}
375
376static int sja1105_init_general_params(struct sja1105_private *priv)
377{
378 struct sja1105_general_params_entry default_general_params = {
379 /* Disallow dynamic changing of the mirror port */
380 .mirr_ptacu = 0,
381 .switchid = priv->ds->index,
382 /* Priority queue for link-local frames trapped to CPU */
383 .hostprio = 0,
384 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
385 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
386 .incl_srcpt1 = true,
387 .send_meta1 = false,
388 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
389 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
390 .incl_srcpt0 = true,
391 .send_meta0 = false,
392 /* The destination for traffic matching mac_fltres1 and
393 * mac_fltres0 on all ports except host_port. Such traffic
394 * receieved on host_port itself would be dropped, except
395 * by installing a temporary 'management route'
396 */
397 .host_port = dsa_upstream_port(priv->ds, 0),
398 /* Same as host port */
399 .mirr_port = dsa_upstream_port(priv->ds, 0),
400 /* Link-local traffic received on casc_port will be forwarded
401 * to host_port without embedding the source port and device ID
402 * info in the destination MAC address (presumably because it
403 * is a cascaded port and a downstream SJA switch already did
404 * that). Default to an invalid port (to disable the feature)
405 * and overwrite this if we find any DSA (cascaded) ports.
406 */
407 .casc_port = SJA1105_NUM_PORTS,
408 /* No TTEthernet */
409 .vllupformat = 0,
410 .vlmarker = 0,
411 .vlmask = 0,
412 /* Only update correctionField for 1-step PTP (L2 transport) */
413 .ignore2stf = 0,
Vladimir Oltean6666ceb2019-05-02 23:23:34 +0300414 /* Forcefully disable VLAN filtering by telling
415 * the switch that VLAN has a different EtherType.
416 */
417 .tpid = ETH_P_SJA1105,
418 .tpid2 = ETH_P_SJA1105,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300419 };
420 struct sja1105_table *table;
Vladimir Oltean227d07a2019-05-05 13:19:27 +0300421 int i, k = 0;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300422
Vladimir Oltean227d07a2019-05-05 13:19:27 +0300423 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300424 if (dsa_is_dsa_port(priv->ds, i))
425 default_general_params.casc_port = i;
Vladimir Oltean227d07a2019-05-05 13:19:27 +0300426 else if (dsa_is_user_port(priv->ds, i))
427 priv->ports[i].mgmt_slot = k++;
428 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300429
430 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
431
432 if (table->entry_count) {
433 kfree(table->entries);
434 table->entry_count = 0;
435 }
436
437 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
438 table->ops->unpacked_entry_size, GFP_KERNEL);
439 if (!table->entries)
440 return -ENOMEM;
441
442 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
443
444 /* This table only has a single entry */
445 ((struct sja1105_general_params_entry *)table->entries)[0] =
446 default_general_params;
447
448 return 0;
449}
450
451#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
452
453static inline void
454sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
455 int index)
456{
457 policing[index].sharindx = index;
458 policing[index].smax = 65535; /* Burst size in bytes */
459 policing[index].rate = SJA1105_RATE_MBPS(1000);
460 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
461 policing[index].partition = 0;
462}
463
464static int sja1105_init_l2_policing(struct sja1105_private *priv)
465{
466 struct sja1105_l2_policing_entry *policing;
467 struct sja1105_table *table;
468 int i, j, k;
469
470 table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
471
472 /* Discard previous L2 Policing Table */
473 if (table->entry_count) {
474 kfree(table->entries);
475 table->entry_count = 0;
476 }
477
478 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
479 table->ops->unpacked_entry_size, GFP_KERNEL);
480 if (!table->entries)
481 return -ENOMEM;
482
483 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
484
485 policing = table->entries;
486
487 /* k sweeps through all unicast policers (0-39).
488 * bcast sweeps through policers 40-44.
489 */
490 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
491 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
492
493 for (j = 0; j < SJA1105_NUM_TC; j++, k++)
494 sja1105_setup_policer(policing, k);
495
496 /* Set up this port's policer for broadcast traffic */
497 sja1105_setup_policer(policing, bcast);
498 }
499 return 0;
500}
501
502static int sja1105_static_config_load(struct sja1105_private *priv,
503 struct sja1105_dt_port *ports)
504{
505 int rc;
506
507 sja1105_static_config_free(&priv->static_config);
508 rc = sja1105_static_config_init(&priv->static_config,
509 priv->info->static_ops,
510 priv->info->device_id);
511 if (rc)
512 return rc;
513
514 /* Build static configuration */
515 rc = sja1105_init_mac_settings(priv);
516 if (rc < 0)
517 return rc;
518 rc = sja1105_init_mii_settings(priv, ports);
519 if (rc < 0)
520 return rc;
521 rc = sja1105_init_static_fdb(priv);
522 if (rc < 0)
523 return rc;
524 rc = sja1105_init_static_vlan(priv);
525 if (rc < 0)
526 return rc;
527 rc = sja1105_init_l2_lookup_params(priv);
528 if (rc < 0)
529 return rc;
530 rc = sja1105_init_l2_forwarding(priv);
531 if (rc < 0)
532 return rc;
533 rc = sja1105_init_l2_forwarding_params(priv);
534 if (rc < 0)
535 return rc;
536 rc = sja1105_init_l2_policing(priv);
537 if (rc < 0)
538 return rc;
539 rc = sja1105_init_general_params(priv);
540 if (rc < 0)
541 return rc;
542
543 /* Send initial configuration to hardware via SPI */
544 return sja1105_static_config_upload(priv);
545}
546
Vladimir Olteanf5b86312019-05-02 23:23:32 +0300547static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
548 const struct sja1105_dt_port *ports)
549{
550 int i;
551
552 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
553 if (ports->role == XMII_MAC)
554 continue;
555
556 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
557 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
558 priv->rgmii_rx_delay[i] = true;
559
560 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
561 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
562 priv->rgmii_tx_delay[i] = true;
563
564 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
565 !priv->info->setup_rgmii_delay)
566 return -EINVAL;
567 }
568 return 0;
569}
570
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300571static int sja1105_parse_ports_node(struct sja1105_private *priv,
572 struct sja1105_dt_port *ports,
573 struct device_node *ports_node)
574{
575 struct device *dev = &priv->spidev->dev;
576 struct device_node *child;
577
578 for_each_child_of_node(ports_node, child) {
579 struct device_node *phy_node;
580 int phy_mode;
581 u32 index;
582
583 /* Get switch port number from DT */
584 if (of_property_read_u32(child, "reg", &index) < 0) {
585 dev_err(dev, "Port number not defined in device tree "
586 "(property \"reg\")\n");
587 return -ENODEV;
588 }
589
590 /* Get PHY mode from DT */
591 phy_mode = of_get_phy_mode(child);
592 if (phy_mode < 0) {
593 dev_err(dev, "Failed to read phy-mode or "
594 "phy-interface-type property for port %d\n",
595 index);
596 return -ENODEV;
597 }
598 ports[index].phy_mode = phy_mode;
599
600 phy_node = of_parse_phandle(child, "phy-handle", 0);
601 if (!phy_node) {
602 if (!of_phy_is_fixed_link(child)) {
603 dev_err(dev, "phy-handle or fixed-link "
604 "properties missing!\n");
605 return -ENODEV;
606 }
607 /* phy-handle is missing, but fixed-link isn't.
608 * So it's a fixed link. Default to PHY role.
609 */
610 ports[index].role = XMII_PHY;
611 } else {
612 /* phy-handle present => put port in MAC role */
613 ports[index].role = XMII_MAC;
614 of_node_put(phy_node);
615 }
616
617 /* The MAC/PHY role can be overridden with explicit bindings */
618 if (of_property_read_bool(child, "sja1105,role-mac"))
619 ports[index].role = XMII_MAC;
620 else if (of_property_read_bool(child, "sja1105,role-phy"))
621 ports[index].role = XMII_PHY;
622 }
623
624 return 0;
625}
626
627static int sja1105_parse_dt(struct sja1105_private *priv,
628 struct sja1105_dt_port *ports)
629{
630 struct device *dev = &priv->spidev->dev;
631 struct device_node *switch_node = dev->of_node;
632 struct device_node *ports_node;
633 int rc;
634
635 ports_node = of_get_child_by_name(switch_node, "ports");
636 if (!ports_node) {
637 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
638 return -ENODEV;
639 }
640
641 rc = sja1105_parse_ports_node(priv, ports, ports_node);
642 of_node_put(ports_node);
643
644 return rc;
645}
646
647/* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
648static int sja1105_speed[] = {
649 [SJA1105_SPEED_AUTO] = 0,
650 [SJA1105_SPEED_10MBPS] = 10,
651 [SJA1105_SPEED_100MBPS] = 100,
652 [SJA1105_SPEED_1000MBPS] = 1000,
653};
654
655static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
656{
657 int i;
658
659 for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
660 if (sja1105_speed[i] == speed_mbps)
661 return i;
662 return -EINVAL;
663}
664
665/* Set link speed and enable/disable traffic I/O in the MAC configuration
666 * for a specific port.
667 *
668 * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
Vladimir Oltean640f7632019-05-05 13:19:28 +0300669 * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
670 * settings from the STP state, but not persistently (does not
671 * overwrite the static MAC info for this port).
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300672 */
673static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
674 int speed_mbps, bool enabled)
675{
Vladimir Oltean640f7632019-05-05 13:19:28 +0300676 struct sja1105_mac_config_entry dyn_mac;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300677 struct sja1105_xmii_params_entry *mii;
678 struct sja1105_mac_config_entry *mac;
679 struct device *dev = priv->ds->dev;
680 sja1105_phy_interface_t phy_mode;
681 sja1105_speed_t speed;
682 int rc;
683
684 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
685 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
686
687 speed = sja1105_get_speed_cfg(speed_mbps);
688 if (speed_mbps && speed < 0) {
689 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
690 return -EINVAL;
691 }
692
693 /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
694 * configuration table, since this will be used for the clocking setup,
695 * and we no longer need to store it in the static config (already told
696 * hardware we want auto during upload phase).
697 */
698 if (speed_mbps)
699 mac[port].speed = speed;
700 else
701 mac[port].speed = SJA1105_SPEED_AUTO;
702
703 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
704 * tables. On E/T, MAC reconfig tables are not readable, only writable.
705 * We have to *know* what the MAC looks like. For the sake of keeping
706 * the code common, we'll use the static configuration tables as a
707 * reasonable approximation for both E/T and P/Q/R/S.
708 */
Vladimir Oltean640f7632019-05-05 13:19:28 +0300709 dyn_mac = mac[port];
710 dyn_mac.ingress = enabled && mac[port].ingress;
711 dyn_mac.egress = enabled && mac[port].egress;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300712
713 /* Write to the dynamic reconfiguration tables */
714 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
Vladimir Oltean640f7632019-05-05 13:19:28 +0300715 port, &dyn_mac, true);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300716 if (rc < 0) {
717 dev_err(dev, "Failed to write MAC config: %d\n", rc);
718 return rc;
719 }
720
721 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
722 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
723 * RMII no change of the clock setup is required. Actually, changing
724 * the clock setup does interrupt the clock signal for a certain time
725 * which causes trouble for all PHYs relying on this signal.
726 */
727 if (!enabled)
728 return 0;
729
730 phy_mode = mii->xmii_mode[port];
731 if (phy_mode != XMII_MODE_RGMII)
732 return 0;
733
734 return sja1105_clocking_setup_port(priv, port);
735}
736
Vladimir Olteanaf7cd032019-05-28 20:38:17 +0300737static void sja1105_mac_config(struct dsa_switch *ds, int port,
738 unsigned int link_an_mode,
739 const struct phylink_link_state *state)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300740{
741 struct sja1105_private *priv = ds->priv;
742
Vladimir Olteanaf7cd032019-05-28 20:38:17 +0300743 if (!state->link)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300744 sja1105_adjust_port_config(priv, port, 0, false);
745 else
Vladimir Olteanaf7cd032019-05-28 20:38:17 +0300746 sja1105_adjust_port_config(priv, port, state->speed, true);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300747}
748
Vladimir Olteanad9f2992019-05-02 23:23:38 +0300749static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
750 unsigned long *supported,
751 struct phylink_link_state *state)
752{
753 /* Construct a new mask which exhaustively contains all link features
754 * supported by the MAC, and then apply that (logical AND) to what will
755 * be sent to the PHY for "marketing".
756 */
757 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
758 struct sja1105_private *priv = ds->priv;
759 struct sja1105_xmii_params_entry *mii;
760
761 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
762
763 /* The MAC does not support pause frames, and also doesn't
764 * support half-duplex traffic modes.
765 */
766 phylink_set(mask, Autoneg);
767 phylink_set(mask, MII);
768 phylink_set(mask, 10baseT_Full);
769 phylink_set(mask, 100baseT_Full);
770 if (mii->xmii_mode[port] == XMII_MODE_RGMII)
771 phylink_set(mask, 1000baseT_Full);
772
773 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
774 bitmap_and(state->advertising, state->advertising, mask,
775 __ETHTOOL_LINK_MODE_MASK_NBITS);
776}
777
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300778/* First-generation switches have a 4-way set associative TCAM that
779 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
780 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
781 * For the placement of a newly learnt FDB entry, the switch selects the bin
782 * based on a hash function, and the way within that bin incrementally.
783 */
784static inline int sja1105et_fdb_index(int bin, int way)
785{
786 return bin * SJA1105ET_FDB_BIN_SIZE + way;
787}
788
789static int sja1105_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
790 const u8 *addr, u16 vid,
791 struct sja1105_l2_lookup_entry *match,
792 int *last_unused)
793{
794 int way;
795
796 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
797 struct sja1105_l2_lookup_entry l2_lookup = {0};
798 int index = sja1105et_fdb_index(bin, way);
799
800 /* Skip unused entries, optionally marking them
801 * into the return value
802 */
803 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
804 index, &l2_lookup)) {
805 if (last_unused)
806 *last_unused = way;
807 continue;
808 }
809
810 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
811 l2_lookup.vlanid == vid) {
812 if (match)
813 *match = l2_lookup;
814 return way;
815 }
816 }
817 /* Return an invalid entry index if not found */
818 return -1;
819}
820
821static int sja1105_fdb_add(struct dsa_switch *ds, int port,
822 const unsigned char *addr, u16 vid)
823{
824 struct sja1105_l2_lookup_entry l2_lookup = {0};
825 struct sja1105_private *priv = ds->priv;
826 struct device *dev = ds->dev;
827 int last_unused = -1;
828 int bin, way;
829
830 bin = sja1105_fdb_hash(priv, addr, vid);
831
832 way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
833 &l2_lookup, &last_unused);
834 if (way >= 0) {
835 /* We have an FDB entry. Is our port in the destination
836 * mask? If yes, we need to do nothing. If not, we need
837 * to rewrite the entry by adding this port to it.
838 */
839 if (l2_lookup.destports & BIT(port))
840 return 0;
841 l2_lookup.destports |= BIT(port);
842 } else {
843 int index = sja1105et_fdb_index(bin, way);
844
845 /* We don't have an FDB entry. We construct a new one and
846 * try to find a place for it within the FDB table.
847 */
848 l2_lookup.macaddr = ether_addr_to_u64(addr);
849 l2_lookup.destports = BIT(port);
850 l2_lookup.vlanid = vid;
851
852 if (last_unused >= 0) {
853 way = last_unused;
854 } else {
855 /* Bin is full, need to evict somebody.
856 * Choose victim at random. If you get these messages
857 * often, you may need to consider changing the
858 * distribution function:
859 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
860 */
861 get_random_bytes(&way, sizeof(u8));
862 way %= SJA1105ET_FDB_BIN_SIZE;
863 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
864 bin, addr, way);
865 /* Evict entry */
866 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
867 index, NULL, false);
868 }
869 }
870 l2_lookup.index = sja1105et_fdb_index(bin, way);
871
872 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
873 l2_lookup.index, &l2_lookup,
874 true);
875}
876
877static int sja1105_fdb_del(struct dsa_switch *ds, int port,
878 const unsigned char *addr, u16 vid)
879{
880 struct sja1105_l2_lookup_entry l2_lookup = {0};
881 struct sja1105_private *priv = ds->priv;
882 int index, bin, way;
883 bool keep;
884
885 bin = sja1105_fdb_hash(priv, addr, vid);
886 way = sja1105_is_fdb_entry_in_bin(priv, bin, addr, vid,
887 &l2_lookup, NULL);
888 if (way < 0)
889 return 0;
890 index = sja1105et_fdb_index(bin, way);
891
892 /* We have an FDB entry. Is our port in the destination mask? If yes,
893 * we need to remove it. If the resulting port mask becomes empty, we
894 * need to completely evict the FDB entry.
895 * Otherwise we just write it back.
896 */
897 if (l2_lookup.destports & BIT(port))
898 l2_lookup.destports &= ~BIT(port);
899 if (l2_lookup.destports)
900 keep = true;
901 else
902 keep = false;
903
904 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
905 index, &l2_lookup, keep);
906}
907
908static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
909 dsa_fdb_dump_cb_t *cb, void *data)
910{
911 struct sja1105_private *priv = ds->priv;
912 struct device *dev = ds->dev;
913 int i;
914
915 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
916 struct sja1105_l2_lookup_entry l2_lookup = {0};
917 u8 macaddr[ETH_ALEN];
918 int rc;
919
920 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
921 i, &l2_lookup);
922 /* No fdb entry at i, not an issue */
923 if (rc == -EINVAL)
924 continue;
925 if (rc) {
926 dev_err(dev, "Failed to dump FDB: %d\n", rc);
927 return rc;
928 }
929
930 /* FDB dump callback is per port. This means we have to
931 * disregard a valid entry if it's not for this port, even if
932 * only to revisit it later. This is inefficient because the
933 * 1024-sized FDB table needs to be traversed 4 times through
934 * SPI during a 'bridge fdb show' command.
935 */
936 if (!(l2_lookup.destports & BIT(port)))
937 continue;
938 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
939 cb(macaddr, l2_lookup.vlanid, false, data);
940 }
941 return 0;
942}
943
944/* This callback needs to be present */
945static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
946 const struct switchdev_obj_port_mdb *mdb)
947{
948 return 0;
949}
950
951static void sja1105_mdb_add(struct dsa_switch *ds, int port,
952 const struct switchdev_obj_port_mdb *mdb)
953{
954 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
955}
956
957static int sja1105_mdb_del(struct dsa_switch *ds, int port,
958 const struct switchdev_obj_port_mdb *mdb)
959{
960 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
961}
962
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300963static int sja1105_bridge_member(struct dsa_switch *ds, int port,
964 struct net_device *br, bool member)
965{
966 struct sja1105_l2_forwarding_entry *l2_fwd;
967 struct sja1105_private *priv = ds->priv;
968 int i, rc;
969
970 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
971
972 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
973 /* Add this port to the forwarding matrix of the
974 * other ports in the same bridge, and viceversa.
975 */
976 if (!dsa_is_user_port(ds, i))
977 continue;
978 /* For the ports already under the bridge, only one thing needs
979 * to be done, and that is to add this port to their
980 * reachability domain. So we can perform the SPI write for
981 * them immediately. However, for this port itself (the one
982 * that is new to the bridge), we need to add all other ports
983 * to its reachability domain. So we do that incrementally in
984 * this loop, and perform the SPI write only at the end, once
985 * the domain contains all other bridge ports.
986 */
987 if (i == port)
988 continue;
989 if (dsa_to_port(ds, i)->bridge_dev != br)
990 continue;
991 sja1105_port_allow_traffic(l2_fwd, i, port, member);
992 sja1105_port_allow_traffic(l2_fwd, port, i, member);
993
994 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
995 i, &l2_fwd[i], true);
996 if (rc < 0)
997 return rc;
998 }
999
1000 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1001 port, &l2_fwd[port], true);
1002}
1003
Vladimir Oltean640f7632019-05-05 13:19:28 +03001004static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1005 u8 state)
1006{
1007 struct sja1105_private *priv = ds->priv;
1008 struct sja1105_mac_config_entry *mac;
1009
1010 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1011
1012 switch (state) {
1013 case BR_STATE_DISABLED:
1014 case BR_STATE_BLOCKING:
1015 /* From UM10944 description of DRPDTAG (why put this there?):
1016 * "Management traffic flows to the port regardless of the state
1017 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1018 * At the moment no difference between DISABLED and BLOCKING.
1019 */
1020 mac[port].ingress = false;
1021 mac[port].egress = false;
1022 mac[port].dyn_learn = false;
1023 break;
1024 case BR_STATE_LISTENING:
1025 mac[port].ingress = true;
1026 mac[port].egress = false;
1027 mac[port].dyn_learn = false;
1028 break;
1029 case BR_STATE_LEARNING:
1030 mac[port].ingress = true;
1031 mac[port].egress = false;
1032 mac[port].dyn_learn = true;
1033 break;
1034 case BR_STATE_FORWARDING:
1035 mac[port].ingress = true;
1036 mac[port].egress = true;
1037 mac[port].dyn_learn = true;
1038 break;
1039 default:
1040 dev_err(ds->dev, "invalid STP state: %d\n", state);
1041 return;
1042 }
1043
1044 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1045 &mac[port], true);
1046}
1047
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001048static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1049 struct net_device *br)
1050{
1051 return sja1105_bridge_member(ds, port, br, true);
1052}
1053
1054static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1055 struct net_device *br)
1056{
1057 sja1105_bridge_member(ds, port, br, false);
1058}
1059
Vladimir Oltean640f7632019-05-05 13:19:28 +03001060static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
1061{
1062 struct sja1105_mac_config_entry *mac;
1063
1064 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1065
1066 if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1067 return BR_STATE_BLOCKING;
1068 if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1069 return BR_STATE_LISTENING;
1070 if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
1071 return BR_STATE_LEARNING;
1072 if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
1073 return BR_STATE_FORWARDING;
Vladimir Oltean3b2c4f42019-05-08 23:32:25 +03001074 /* This is really an error condition if the MAC was in none of the STP
1075 * states above. But treating the port as disabled does nothing, which
1076 * is adequate, and it also resets the MAC to a known state later on.
1077 */
1078 return BR_STATE_DISABLED;
Vladimir Oltean640f7632019-05-05 13:19:28 +03001079}
1080
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001081/* For situations where we need to change a setting at runtime that is only
1082 * available through the static configuration, resetting the switch in order
1083 * to upload the new static config is unavoidable. Back up the settings we
1084 * modify at runtime (currently only MAC) and restore them after uploading,
1085 * such that this operation is relatively seamless.
1086 */
1087static int sja1105_static_config_reload(struct sja1105_private *priv)
1088{
1089 struct sja1105_mac_config_entry *mac;
1090 int speed_mbps[SJA1105_NUM_PORTS];
Vladimir Oltean640f7632019-05-05 13:19:28 +03001091 u8 stp_state[SJA1105_NUM_PORTS];
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001092 int rc, i;
1093
1094 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1095
1096 /* Back up settings changed by sja1105_adjust_port_config and
Vladimir Oltean640f7632019-05-05 13:19:28 +03001097 * sja1105_bridge_stp_state_set and restore their defaults.
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001098 */
1099 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1100 speed_mbps[i] = sja1105_speed[mac[i].speed];
1101 mac[i].speed = SJA1105_SPEED_AUTO;
Vladimir Oltean640f7632019-05-05 13:19:28 +03001102 if (i == dsa_upstream_port(priv->ds, i)) {
1103 mac[i].ingress = true;
1104 mac[i].egress = true;
1105 mac[i].dyn_learn = true;
1106 } else {
1107 stp_state[i] = sja1105_stp_state_get(priv, i);
1108 mac[i].ingress = false;
1109 mac[i].egress = false;
1110 mac[i].dyn_learn = false;
1111 }
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001112 }
1113
1114 /* Reset switch and send updated static configuration */
1115 rc = sja1105_static_config_upload(priv);
1116 if (rc < 0)
1117 goto out;
1118
1119 /* Configure the CGU (PLLs) for MII and RMII PHYs.
1120 * For these interfaces there is no dynamic configuration
1121 * needed, since PLLs have same settings at all speeds.
1122 */
1123 rc = sja1105_clocking_setup(priv);
1124 if (rc < 0)
1125 goto out;
1126
1127 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1128 bool enabled = (speed_mbps[i] != 0);
1129
Vladimir Oltean640f7632019-05-05 13:19:28 +03001130 if (i != dsa_upstream_port(priv->ds, i))
1131 sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
1132
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001133 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
1134 enabled);
1135 if (rc < 0)
1136 goto out;
1137 }
1138out:
1139 return rc;
1140}
1141
1142/* The TPID setting belongs to the General Parameters table,
1143 * which can only be partially reconfigured at runtime (and not the TPID).
1144 * So a switch reset is required.
1145 */
1146static int sja1105_change_tpid(struct sja1105_private *priv,
1147 u16 tpid, u16 tpid2)
1148{
1149 struct sja1105_general_params_entry *general_params;
1150 struct sja1105_table *table;
1151
1152 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1153 general_params = table->entries;
1154 general_params->tpid = tpid;
1155 general_params->tpid2 = tpid2;
1156 return sja1105_static_config_reload(priv);
1157}
1158
1159static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1160{
1161 struct sja1105_mac_config_entry *mac;
1162
1163 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1164
1165 mac[port].vlanid = pvid;
1166
1167 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1168 &mac[port], true);
1169}
1170
1171static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
1172{
1173 struct sja1105_vlan_lookup_entry *vlan;
1174 int count, i;
1175
1176 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
1177 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
1178
1179 for (i = 0; i < count; i++)
1180 if (vlan[i].vlanid == vid)
1181 return i;
1182
1183 /* Return an invalid entry index if not found */
1184 return -1;
1185}
1186
1187static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
1188 bool enabled, bool untagged)
1189{
1190 struct sja1105_vlan_lookup_entry *vlan;
1191 struct sja1105_table *table;
1192 bool keep = true;
1193 int match, rc;
1194
1195 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
1196
1197 match = sja1105_is_vlan_configured(priv, vid);
1198 if (match < 0) {
1199 /* Can't delete a missing entry. */
1200 if (!enabled)
1201 return 0;
1202 rc = sja1105_table_resize(table, table->entry_count + 1);
1203 if (rc)
1204 return rc;
1205 match = table->entry_count - 1;
1206 }
1207 /* Assign pointer after the resize (it's new memory) */
1208 vlan = table->entries;
1209 vlan[match].vlanid = vid;
1210 if (enabled) {
1211 vlan[match].vlan_bc |= BIT(port);
1212 vlan[match].vmemb_port |= BIT(port);
1213 } else {
1214 vlan[match].vlan_bc &= ~BIT(port);
1215 vlan[match].vmemb_port &= ~BIT(port);
1216 }
1217 /* Also unset tag_port if removing this VLAN was requested,
1218 * just so we don't have a confusing bitmap (no practical purpose).
1219 */
1220 if (untagged || !enabled)
1221 vlan[match].tag_port &= ~BIT(port);
1222 else
1223 vlan[match].tag_port |= BIT(port);
1224 /* If there's no port left as member of this VLAN,
1225 * it's time for it to go.
1226 */
1227 if (!vlan[match].vmemb_port)
1228 keep = false;
1229
1230 dev_dbg(priv->ds->dev,
1231 "%s: port %d, vid %llu, broadcast domain 0x%llx, "
1232 "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
1233 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
1234 vlan[match].vmemb_port, vlan[match].tag_port, keep);
1235
1236 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
1237 &vlan[match], keep);
1238 if (rc < 0)
1239 return rc;
1240
1241 if (!keep)
1242 return sja1105_table_delete_entry(table, match);
1243
1244 return 0;
1245}
1246
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001247static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1248{
1249 int rc, i;
1250
1251 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1252 rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1253 if (rc < 0) {
1254 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1255 i, rc);
1256 return rc;
1257 }
1258 }
1259 dev_info(ds->dev, "%s switch tagging\n",
1260 enabled ? "Enabled" : "Disabled");
1261 return 0;
1262}
1263
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001264static enum dsa_tag_protocol
1265sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
1266{
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001267 return DSA_TAG_PROTO_SJA1105;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001268}
1269
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001270/* This callback needs to be present */
1271static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1272 const struct switchdev_obj_port_vlan *vlan)
1273{
1274 return 0;
1275}
1276
1277static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1278{
1279 struct sja1105_private *priv = ds->priv;
1280 int rc;
1281
1282 if (enabled)
1283 /* Enable VLAN filtering. */
1284 rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD);
1285 else
1286 /* Disable VLAN filtering. */
1287 rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105);
1288 if (rc)
1289 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
1290
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001291 /* Switch port identification based on 802.1Q is only passable
1292 * if we are not under a vlan_filtering bridge. So make sure
1293 * the two configurations are mutually exclusive.
1294 */
1295 return sja1105_setup_8021q_tagging(ds, !enabled);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001296}
1297
1298static void sja1105_vlan_add(struct dsa_switch *ds, int port,
1299 const struct switchdev_obj_port_vlan *vlan)
1300{
1301 struct sja1105_private *priv = ds->priv;
1302 u16 vid;
1303 int rc;
1304
1305 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1306 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
1307 BRIDGE_VLAN_INFO_UNTAGGED);
1308 if (rc < 0) {
1309 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
1310 vid, port, rc);
1311 return;
1312 }
1313 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1314 rc = sja1105_pvid_apply(ds->priv, port, vid);
1315 if (rc < 0) {
1316 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
1317 vid, port, rc);
1318 return;
1319 }
1320 }
1321 }
1322}
1323
1324static int sja1105_vlan_del(struct dsa_switch *ds, int port,
1325 const struct switchdev_obj_port_vlan *vlan)
1326{
1327 struct sja1105_private *priv = ds->priv;
1328 u16 vid;
1329 int rc;
1330
1331 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1332 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
1333 BRIDGE_VLAN_INFO_UNTAGGED);
1334 if (rc < 0) {
1335 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
1336 vid, port, rc);
1337 return rc;
1338 }
1339 }
1340 return 0;
1341}
1342
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001343/* The programming model for the SJA1105 switch is "all-at-once" via static
1344 * configuration tables. Some of these can be dynamically modified at runtime,
1345 * but not the xMII mode parameters table.
1346 * Furthermode, some PHYs may not have crystals for generating their clocks
1347 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
1348 * ref_clk pin. So port clocking needs to be initialized early, before
1349 * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
1350 * Setting correct PHY link speed does not matter now.
1351 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
1352 * bindings are not yet parsed by DSA core. We need to parse early so that we
1353 * can populate the xMII mode parameters table.
1354 */
1355static int sja1105_setup(struct dsa_switch *ds)
1356{
1357 struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
1358 struct sja1105_private *priv = ds->priv;
1359 int rc;
1360
1361 rc = sja1105_parse_dt(priv, ports);
1362 if (rc < 0) {
1363 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
1364 return rc;
1365 }
Vladimir Olteanf5b86312019-05-02 23:23:32 +03001366
1367 /* Error out early if internal delays are required through DT
1368 * and we can't apply them.
1369 */
1370 rc = sja1105_parse_rgmii_delays(priv, ports);
1371 if (rc < 0) {
1372 dev_err(ds->dev, "RGMII delay not supported\n");
1373 return rc;
1374 }
1375
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001376 /* Create and send configuration down to device */
1377 rc = sja1105_static_config_load(priv, ports);
1378 if (rc < 0) {
1379 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
1380 return rc;
1381 }
1382 /* Configure the CGU (PHY link modes and speeds) */
1383 rc = sja1105_clocking_setup(priv);
1384 if (rc < 0) {
1385 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
1386 return rc;
1387 }
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001388 /* On SJA1105, VLAN filtering per se is always enabled in hardware.
1389 * The only thing we can do to disable it is lie about what the 802.1Q
1390 * EtherType is.
1391 * So it will still try to apply VLAN filtering, but all ingress
1392 * traffic (except frames received with EtherType of ETH_P_SJA1105)
1393 * will be internally tagged with a distorted VLAN header where the
1394 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
1395 */
1396 ds->vlan_filtering_is_global = true;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001397
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001398 /* The DSA/switchdev model brings up switch ports in standalone mode by
1399 * default, and that means vlan_filtering is 0 since they're not under
1400 * a bridge, so it's safe to set up switch tagging at this time.
1401 */
1402 return sja1105_setup_8021q_tagging(ds, true);
1403}
1404
1405static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
1406 struct sk_buff *skb)
1407{
1408 struct sja1105_mgmt_entry mgmt_route = {0};
1409 struct sja1105_private *priv = ds->priv;
1410 struct ethhdr *hdr;
1411 int timeout = 10;
1412 int rc;
1413
1414 hdr = eth_hdr(skb);
1415
1416 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
1417 mgmt_route.destports = BIT(port);
1418 mgmt_route.enfport = 1;
1419
1420 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1421 slot, &mgmt_route, true);
1422 if (rc < 0) {
1423 kfree_skb(skb);
1424 return rc;
1425 }
1426
1427 /* Transfer skb to the host port. */
1428 dsa_enqueue_skb(skb, ds->ports[port].slave);
1429
1430 /* Wait until the switch has processed the frame */
1431 do {
1432 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
1433 slot, &mgmt_route);
1434 if (rc < 0) {
1435 dev_err_ratelimited(priv->ds->dev,
1436 "failed to poll for mgmt route\n");
1437 continue;
1438 }
1439
1440 /* UM10944: The ENFPORT flag of the respective entry is
1441 * cleared when a match is found. The host can use this
1442 * flag as an acknowledgment.
1443 */
1444 cpu_relax();
1445 } while (mgmt_route.enfport && --timeout);
1446
1447 if (!timeout) {
1448 /* Clean up the management route so that a follow-up
1449 * frame may not match on it by mistake.
1450 */
1451 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1452 slot, &mgmt_route, false);
1453 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
1454 }
1455
1456 return NETDEV_TX_OK;
1457}
1458
1459/* Deferred work is unfortunately necessary because setting up the management
1460 * route cannot be done from atomit context (SPI transfer takes a sleepable
1461 * lock on the bus)
1462 */
1463static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
1464 struct sk_buff *skb)
1465{
1466 struct sja1105_private *priv = ds->priv;
1467 struct sja1105_port *sp = &priv->ports[port];
1468 int slot = sp->mgmt_slot;
1469
1470 /* The tragic fact about the switch having 4x2 slots for installing
1471 * management routes is that all of them except one are actually
1472 * useless.
1473 * If 2 slots are simultaneously configured for two BPDUs sent to the
1474 * same (multicast) DMAC but on different egress ports, the switch
1475 * would confuse them and redirect first frame it receives on the CPU
1476 * port towards the port configured on the numerically first slot
1477 * (therefore wrong port), then second received frame on second slot
1478 * (also wrong port).
1479 * So for all practical purposes, there needs to be a lock that
1480 * prevents that from happening. The slot used here is utterly useless
1481 * (could have simply been 0 just as fine), but we are doing it
1482 * nonetheless, in case a smarter idea ever comes up in the future.
1483 */
1484 mutex_lock(&priv->mgmt_lock);
1485
1486 sja1105_mgmt_xmit(ds, port, slot, skb);
1487
1488 mutex_unlock(&priv->mgmt_lock);
1489 return NETDEV_TX_OK;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001490}
1491
Vladimir Oltean84567212019-05-02 23:23:36 +03001492/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
1493 * which cannot be reconfigured at runtime. So a switch reset is required.
1494 */
1495static int sja1105_set_ageing_time(struct dsa_switch *ds,
1496 unsigned int ageing_time)
1497{
1498 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1499 struct sja1105_private *priv = ds->priv;
1500 struct sja1105_table *table;
1501 unsigned int maxage;
1502
1503 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1504 l2_lookup_params = table->entries;
1505
1506 maxage = SJA1105_AGEING_TIME_MS(ageing_time);
1507
1508 if (l2_lookup_params->maxage == maxage)
1509 return 0;
1510
1511 l2_lookup_params->maxage = maxage;
1512
1513 return sja1105_static_config_reload(priv);
1514}
1515
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001516static const struct dsa_switch_ops sja1105_switch_ops = {
1517 .get_tag_protocol = sja1105_get_tag_protocol,
1518 .setup = sja1105_setup,
Vladimir Oltean84567212019-05-02 23:23:36 +03001519 .set_ageing_time = sja1105_set_ageing_time,
Vladimir Olteanad9f2992019-05-02 23:23:38 +03001520 .phylink_validate = sja1105_phylink_validate,
Vladimir Olteanaf7cd032019-05-28 20:38:17 +03001521 .phylink_mac_config = sja1105_mac_config,
Vladimir Oltean52c34e62019-05-02 23:23:35 +03001522 .get_strings = sja1105_get_strings,
1523 .get_ethtool_stats = sja1105_get_ethtool_stats,
1524 .get_sset_count = sja1105_get_sset_count,
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001525 .port_fdb_dump = sja1105_fdb_dump,
1526 .port_fdb_add = sja1105_fdb_add,
1527 .port_fdb_del = sja1105_fdb_del,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001528 .port_bridge_join = sja1105_bridge_join,
1529 .port_bridge_leave = sja1105_bridge_leave,
Vladimir Oltean640f7632019-05-05 13:19:28 +03001530 .port_stp_state_set = sja1105_bridge_stp_state_set,
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001531 .port_vlan_prepare = sja1105_vlan_prepare,
1532 .port_vlan_filtering = sja1105_vlan_filtering,
1533 .port_vlan_add = sja1105_vlan_add,
1534 .port_vlan_del = sja1105_vlan_del,
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001535 .port_mdb_prepare = sja1105_mdb_prepare,
1536 .port_mdb_add = sja1105_mdb_add,
1537 .port_mdb_del = sja1105_mdb_del,
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001538 .port_deferred_xmit = sja1105_port_deferred_xmit,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001539};
1540
1541static int sja1105_check_device_id(struct sja1105_private *priv)
1542{
1543 const struct sja1105_regs *regs = priv->info->regs;
1544 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
1545 struct device *dev = &priv->spidev->dev;
1546 u64 device_id;
1547 u64 part_no;
1548 int rc;
1549
1550 rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
1551 &device_id, SJA1105_SIZE_DEVICE_ID);
1552 if (rc < 0)
1553 return rc;
1554
1555 if (device_id != priv->info->device_id) {
1556 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
1557 priv->info->device_id, device_id);
1558 return -ENODEV;
1559 }
1560
1561 rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
1562 prod_id, SJA1105_SIZE_DEVICE_ID);
1563 if (rc < 0)
1564 return rc;
1565
1566 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
1567
1568 if (part_no != priv->info->part_no) {
1569 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
1570 priv->info->part_no, part_no);
1571 return -ENODEV;
1572 }
1573
1574 return 0;
1575}
1576
1577static int sja1105_probe(struct spi_device *spi)
1578{
1579 struct device *dev = &spi->dev;
1580 struct sja1105_private *priv;
1581 struct dsa_switch *ds;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001582 int rc, i;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001583
1584 if (!dev->of_node) {
1585 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
1586 return -EINVAL;
1587 }
1588
1589 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
1590 if (!priv)
1591 return -ENOMEM;
1592
1593 /* Configure the optional reset pin and bring up switch */
1594 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
1595 if (IS_ERR(priv->reset_gpio))
1596 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
1597 else
1598 sja1105_hw_reset(priv->reset_gpio, 1, 1);
1599
1600 /* Populate our driver private structure (priv) based on
1601 * the device tree node that was probed (spi)
1602 */
1603 priv->spidev = spi;
1604 spi_set_drvdata(spi, priv);
1605
1606 /* Configure the SPI bus */
1607 spi->bits_per_word = 8;
1608 rc = spi_setup(spi);
1609 if (rc < 0) {
1610 dev_err(dev, "Could not init SPI\n");
1611 return rc;
1612 }
1613
1614 priv->info = of_device_get_match_data(dev);
1615
1616 /* Detect hardware device */
1617 rc = sja1105_check_device_id(priv);
1618 if (rc < 0) {
1619 dev_err(dev, "Device ID check failed: %d\n", rc);
1620 return rc;
1621 }
1622
1623 dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
1624
1625 ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
1626 if (!ds)
1627 return -ENOMEM;
1628
1629 ds->ops = &sja1105_switch_ops;
1630 ds->priv = priv;
1631 priv->ds = ds;
1632
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001633 /* Connections between dsa_port and sja1105_port */
1634 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1635 struct sja1105_port *sp = &priv->ports[i];
1636
1637 ds->ports[i].priv = sp;
1638 sp->dp = &ds->ports[i];
1639 }
1640 mutex_init(&priv->mgmt_lock);
1641
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001642 return dsa_register_switch(priv->ds);
1643}
1644
1645static int sja1105_remove(struct spi_device *spi)
1646{
1647 struct sja1105_private *priv = spi_get_drvdata(spi);
1648
1649 dsa_unregister_switch(priv->ds);
1650 sja1105_static_config_free(&priv->static_config);
1651 return 0;
1652}
1653
1654static const struct of_device_id sja1105_dt_ids[] = {
1655 { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
1656 { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
1657 { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
1658 { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
1659 { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
1660 { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
1661 { /* sentinel */ },
1662};
1663MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
1664
1665static struct spi_driver sja1105_driver = {
1666 .driver = {
1667 .name = "sja1105",
1668 .owner = THIS_MODULE,
1669 .of_match_table = of_match_ptr(sja1105_dt_ids),
1670 },
1671 .probe = sja1105_probe,
1672 .remove = sja1105_remove,
1673};
1674
1675module_spi_driver(sja1105_driver);
1676
1677MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
1678MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
1679MODULE_DESCRIPTION("SJA1105 Driver");
1680MODULE_LICENSE("GPL v2");