blob: 8343dcf483843b0d0d8a0bd9ec463b6ecb03211e [file] [log] [blame]
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
3 * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
4 */
5
6#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8#include <linux/delay.h>
9#include <linux/module.h>
10#include <linux/printk.h>
11#include <linux/spi/spi.h>
12#include <linux/errno.h>
13#include <linux/gpio/consumer.h>
Vladimir Olteanad9f2992019-05-02 23:23:38 +030014#include <linux/phylink.h>
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030015#include <linux/of.h>
16#include <linux/of_net.h>
17#include <linux/of_mdio.h>
18#include <linux/of_device.h>
19#include <linux/netdev_features.h>
20#include <linux/netdevice.h>
21#include <linux/if_bridge.h>
22#include <linux/if_ether.h>
Vladimir Oltean227d07a2019-05-05 13:19:27 +030023#include <linux/dsa/8021q.h>
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030024#include "sja1105.h"
25
26static void sja1105_hw_reset(struct gpio_desc *gpio, unsigned int pulse_len,
27 unsigned int startup_delay)
28{
29 gpiod_set_value_cansleep(gpio, 1);
30 /* Wait for minimum reset pulse length */
31 msleep(pulse_len);
32 gpiod_set_value_cansleep(gpio, 0);
33 /* Wait until chip is ready after reset */
34 msleep(startup_delay);
35}
36
37static void
38sja1105_port_allow_traffic(struct sja1105_l2_forwarding_entry *l2_fwd,
39 int from, int to, bool allow)
40{
41 if (allow) {
42 l2_fwd[from].bc_domain |= BIT(to);
43 l2_fwd[from].reach_port |= BIT(to);
44 l2_fwd[from].fl_domain |= BIT(to);
45 } else {
46 l2_fwd[from].bc_domain &= ~BIT(to);
47 l2_fwd[from].reach_port &= ~BIT(to);
48 l2_fwd[from].fl_domain &= ~BIT(to);
49 }
50}
51
52/* Structure used to temporarily transport device tree
53 * settings into sja1105_setup
54 */
55struct sja1105_dt_port {
56 phy_interface_t phy_mode;
57 sja1105_mii_role_t role;
58};
59
60static int sja1105_init_mac_settings(struct sja1105_private *priv)
61{
62 struct sja1105_mac_config_entry default_mac = {
63 /* Enable all 8 priority queues on egress.
64 * Every queue i holds top[i] - base[i] frames.
65 * Sum of top[i] - base[i] is 511 (max hardware limit).
66 */
67 .top = {0x3F, 0x7F, 0xBF, 0xFF, 0x13F, 0x17F, 0x1BF, 0x1FF},
68 .base = {0x0, 0x40, 0x80, 0xC0, 0x100, 0x140, 0x180, 0x1C0},
69 .enabled = {true, true, true, true, true, true, true, true},
70 /* Keep standard IFG of 12 bytes on egress. */
71 .ifg = 0,
72 /* Always put the MAC speed in automatic mode, where it can be
73 * retrieved from the PHY object through phylib and
74 * sja1105_adjust_port_config.
75 */
76 .speed = SJA1105_SPEED_AUTO,
77 /* No static correction for 1-step 1588 events */
78 .tp_delin = 0,
79 .tp_delout = 0,
80 /* Disable aging for critical TTEthernet traffic */
81 .maxage = 0xFF,
82 /* Internal VLAN (pvid) to apply to untagged ingress */
83 .vlanprio = 0,
84 .vlanid = 0,
85 .ing_mirr = false,
86 .egr_mirr = false,
87 /* Don't drop traffic with other EtherType than ETH_P_IP */
88 .drpnona664 = false,
89 /* Don't drop double-tagged traffic */
90 .drpdtag = false,
91 /* Don't drop untagged traffic */
92 .drpuntag = false,
93 /* Don't retag 802.1p (VID 0) traffic with the pvid */
94 .retag = false,
Vladimir Oltean640f7632019-05-05 13:19:28 +030095 /* Disable learning and I/O on user ports by default -
96 * STP will enable it.
97 */
98 .dyn_learn = false,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +030099 .egress = false,
100 .ingress = false,
101 };
102 struct sja1105_mac_config_entry *mac;
103 struct sja1105_table *table;
104 int i;
105
106 table = &priv->static_config.tables[BLK_IDX_MAC_CONFIG];
107
108 /* Discard previous MAC Configuration Table */
109 if (table->entry_count) {
110 kfree(table->entries);
111 table->entry_count = 0;
112 }
113
114 table->entries = kcalloc(SJA1105_NUM_PORTS,
115 table->ops->unpacked_entry_size, GFP_KERNEL);
116 if (!table->entries)
117 return -ENOMEM;
118
119 /* Override table based on phylib DT bindings */
120 table->entry_count = SJA1105_NUM_PORTS;
121
122 mac = table->entries;
123
Vladimir Oltean640f7632019-05-05 13:19:28 +0300124 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300125 mac[i] = default_mac;
Vladimir Oltean640f7632019-05-05 13:19:28 +0300126 if (i == dsa_upstream_port(priv->ds, i)) {
127 /* STP doesn't get called for CPU port, so we need to
128 * set the I/O parameters statically.
129 */
130 mac[i].dyn_learn = true;
131 mac[i].ingress = true;
132 mac[i].egress = true;
133 }
134 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300135
136 return 0;
137}
138
139static int sja1105_init_mii_settings(struct sja1105_private *priv,
140 struct sja1105_dt_port *ports)
141{
142 struct device *dev = &priv->spidev->dev;
143 struct sja1105_xmii_params_entry *mii;
144 struct sja1105_table *table;
145 int i;
146
147 table = &priv->static_config.tables[BLK_IDX_XMII_PARAMS];
148
149 /* Discard previous xMII Mode Parameters Table */
150 if (table->entry_count) {
151 kfree(table->entries);
152 table->entry_count = 0;
153 }
154
155 table->entries = kcalloc(SJA1105_MAX_XMII_PARAMS_COUNT,
156 table->ops->unpacked_entry_size, GFP_KERNEL);
157 if (!table->entries)
158 return -ENOMEM;
159
160 /* Override table based on phylib DT bindings */
161 table->entry_count = SJA1105_MAX_XMII_PARAMS_COUNT;
162
163 mii = table->entries;
164
165 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
166 switch (ports[i].phy_mode) {
167 case PHY_INTERFACE_MODE_MII:
168 mii->xmii_mode[i] = XMII_MODE_MII;
169 break;
170 case PHY_INTERFACE_MODE_RMII:
171 mii->xmii_mode[i] = XMII_MODE_RMII;
172 break;
173 case PHY_INTERFACE_MODE_RGMII:
174 case PHY_INTERFACE_MODE_RGMII_ID:
175 case PHY_INTERFACE_MODE_RGMII_RXID:
176 case PHY_INTERFACE_MODE_RGMII_TXID:
177 mii->xmii_mode[i] = XMII_MODE_RGMII;
178 break;
179 default:
180 dev_err(dev, "Unsupported PHY mode %s!\n",
181 phy_modes(ports[i].phy_mode));
182 }
183
184 mii->phy_mac[i] = ports[i].role;
185 }
186 return 0;
187}
188
189static int sja1105_init_static_fdb(struct sja1105_private *priv)
190{
191 struct sja1105_table *table;
192
193 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP];
194
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300195 /* We only populate the FDB table through dynamic
196 * L2 Address Lookup entries
197 */
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300198 if (table->entry_count) {
199 kfree(table->entries);
200 table->entry_count = 0;
201 }
202 return 0;
203}
204
205static int sja1105_init_l2_lookup_params(struct sja1105_private *priv)
206{
207 struct sja1105_table *table;
208 struct sja1105_l2_lookup_params_entry default_l2_lookup_params = {
Vladimir Oltean84567212019-05-02 23:23:36 +0300209 /* Learned FDB entries are forgotten after 300 seconds */
210 .maxage = SJA1105_AGEING_TIME_MS(300000),
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300211 /* All entries within a FDB bin are available for learning */
212 .dyn_tbsz = SJA1105ET_FDB_BIN_SIZE,
Vladimir Oltean1da73822019-06-03 00:15:45 +0300213 /* And the P/Q/R/S equivalent setting: */
214 .start_dynspc = 0,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300215 /* 2^8 + 2^5 + 2^3 + 2^2 + 2^1 + 1 in Koopman notation */
216 .poly = 0x97,
217 /* This selects between Independent VLAN Learning (IVL) and
218 * Shared VLAN Learning (SVL)
219 */
220 .shared_learn = false,
221 /* Don't discard management traffic based on ENFPORT -
222 * we don't perform SMAC port enforcement anyway, so
223 * what we are setting here doesn't matter.
224 */
225 .no_enf_hostprt = false,
226 /* Don't learn SMAC for mac_fltres1 and mac_fltres0.
227 * Maybe correlate with no_linklocal_learn from bridge driver?
228 */
229 .no_mgmt_learn = true,
Vladimir Oltean1da73822019-06-03 00:15:45 +0300230 /* P/Q/R/S only */
231 .use_static = true,
232 /* Dynamically learned FDB entries can overwrite other (older)
233 * dynamic FDB entries
234 */
235 .owr_dyn = true,
236 .drpnolearn = true,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300237 };
238
239 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
240
241 if (table->entry_count) {
242 kfree(table->entries);
243 table->entry_count = 0;
244 }
245
246 table->entries = kcalloc(SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT,
247 table->ops->unpacked_entry_size, GFP_KERNEL);
248 if (!table->entries)
249 return -ENOMEM;
250
251 table->entry_count = SJA1105_MAX_L2_LOOKUP_PARAMS_COUNT;
252
253 /* This table only has a single entry */
254 ((struct sja1105_l2_lookup_params_entry *)table->entries)[0] =
255 default_l2_lookup_params;
256
257 return 0;
258}
259
260static int sja1105_init_static_vlan(struct sja1105_private *priv)
261{
262 struct sja1105_table *table;
263 struct sja1105_vlan_lookup_entry pvid = {
264 .ving_mirr = 0,
265 .vegr_mirr = 0,
266 .vmemb_port = 0,
267 .vlan_bc = 0,
268 .tag_port = 0,
269 .vlanid = 0,
270 };
271 int i;
272
273 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
274
275 /* The static VLAN table will only contain the initial pvid of 0.
Vladimir Oltean6666ceb2019-05-02 23:23:34 +0300276 * All other VLANs are to be configured through dynamic entries,
277 * and kept in the static configuration table as backing memory.
278 * The pvid of 0 is sufficient to pass traffic while the ports are
279 * standalone and when vlan_filtering is disabled. When filtering
280 * gets enabled, the switchdev core sets up the VLAN ID 1 and sets
281 * it as the new pvid. Actually 'pvid 1' still comes up in 'bridge
282 * vlan' even when vlan_filtering is off, but it has no effect.
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300283 */
284 if (table->entry_count) {
285 kfree(table->entries);
286 table->entry_count = 0;
287 }
288
289 table->entries = kcalloc(1, table->ops->unpacked_entry_size,
290 GFP_KERNEL);
291 if (!table->entries)
292 return -ENOMEM;
293
294 table->entry_count = 1;
295
296 /* VLAN ID 0: all DT-defined ports are members; no restrictions on
297 * forwarding; always transmit priority-tagged frames as untagged.
298 */
299 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
300 pvid.vmemb_port |= BIT(i);
301 pvid.vlan_bc |= BIT(i);
302 pvid.tag_port &= ~BIT(i);
303 }
304
305 ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid;
306 return 0;
307}
308
309static int sja1105_init_l2_forwarding(struct sja1105_private *priv)
310{
311 struct sja1105_l2_forwarding_entry *l2fwd;
312 struct sja1105_table *table;
313 int i, j;
314
315 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING];
316
317 if (table->entry_count) {
318 kfree(table->entries);
319 table->entry_count = 0;
320 }
321
322 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_COUNT,
323 table->ops->unpacked_entry_size, GFP_KERNEL);
324 if (!table->entries)
325 return -ENOMEM;
326
327 table->entry_count = SJA1105_MAX_L2_FORWARDING_COUNT;
328
329 l2fwd = table->entries;
330
331 /* First 5 entries define the forwarding rules */
332 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
333 unsigned int upstream = dsa_upstream_port(priv->ds, i);
334
335 for (j = 0; j < SJA1105_NUM_TC; j++)
336 l2fwd[i].vlan_pmap[j] = j;
337
338 if (i == upstream)
339 continue;
340
341 sja1105_port_allow_traffic(l2fwd, i, upstream, true);
342 sja1105_port_allow_traffic(l2fwd, upstream, i, true);
343 }
344 /* Next 8 entries define VLAN PCP mapping from ingress to egress.
345 * Create a one-to-one mapping.
346 */
347 for (i = 0; i < SJA1105_NUM_TC; i++)
348 for (j = 0; j < SJA1105_NUM_PORTS; j++)
349 l2fwd[SJA1105_NUM_PORTS + i].vlan_pmap[j] = i;
350
351 return 0;
352}
353
354static int sja1105_init_l2_forwarding_params(struct sja1105_private *priv)
355{
356 struct sja1105_l2_forwarding_params_entry default_l2fwd_params = {
357 /* Disallow dynamic reconfiguration of vlan_pmap */
358 .max_dynp = 0,
359 /* Use a single memory partition for all ingress queues */
360 .part_spc = { SJA1105_MAX_FRAME_MEMORY, 0, 0, 0, 0, 0, 0, 0 },
361 };
362 struct sja1105_table *table;
363
364 table = &priv->static_config.tables[BLK_IDX_L2_FORWARDING_PARAMS];
365
366 if (table->entry_count) {
367 kfree(table->entries);
368 table->entry_count = 0;
369 }
370
371 table->entries = kcalloc(SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT,
372 table->ops->unpacked_entry_size, GFP_KERNEL);
373 if (!table->entries)
374 return -ENOMEM;
375
376 table->entry_count = SJA1105_MAX_L2_FORWARDING_PARAMS_COUNT;
377
378 /* This table only has a single entry */
379 ((struct sja1105_l2_forwarding_params_entry *)table->entries)[0] =
380 default_l2fwd_params;
381
382 return 0;
383}
384
385static int sja1105_init_general_params(struct sja1105_private *priv)
386{
387 struct sja1105_general_params_entry default_general_params = {
388 /* Disallow dynamic changing of the mirror port */
389 .mirr_ptacu = 0,
390 .switchid = priv->ds->index,
391 /* Priority queue for link-local frames trapped to CPU */
392 .hostprio = 0,
393 .mac_fltres1 = SJA1105_LINKLOCAL_FILTER_A,
394 .mac_flt1 = SJA1105_LINKLOCAL_FILTER_A_MASK,
395 .incl_srcpt1 = true,
396 .send_meta1 = false,
397 .mac_fltres0 = SJA1105_LINKLOCAL_FILTER_B,
398 .mac_flt0 = SJA1105_LINKLOCAL_FILTER_B_MASK,
399 .incl_srcpt0 = true,
400 .send_meta0 = false,
401 /* The destination for traffic matching mac_fltres1 and
402 * mac_fltres0 on all ports except host_port. Such traffic
403 * receieved on host_port itself would be dropped, except
404 * by installing a temporary 'management route'
405 */
406 .host_port = dsa_upstream_port(priv->ds, 0),
407 /* Same as host port */
408 .mirr_port = dsa_upstream_port(priv->ds, 0),
409 /* Link-local traffic received on casc_port will be forwarded
410 * to host_port without embedding the source port and device ID
411 * info in the destination MAC address (presumably because it
412 * is a cascaded port and a downstream SJA switch already did
413 * that). Default to an invalid port (to disable the feature)
414 * and overwrite this if we find any DSA (cascaded) ports.
415 */
416 .casc_port = SJA1105_NUM_PORTS,
417 /* No TTEthernet */
418 .vllupformat = 0,
419 .vlmarker = 0,
420 .vlmask = 0,
421 /* Only update correctionField for 1-step PTP (L2 transport) */
422 .ignore2stf = 0,
Vladimir Oltean6666ceb2019-05-02 23:23:34 +0300423 /* Forcefully disable VLAN filtering by telling
424 * the switch that VLAN has a different EtherType.
425 */
426 .tpid = ETH_P_SJA1105,
427 .tpid2 = ETH_P_SJA1105,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300428 };
429 struct sja1105_table *table;
Vladimir Oltean227d07a2019-05-05 13:19:27 +0300430 int i, k = 0;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300431
Vladimir Oltean227d07a2019-05-05 13:19:27 +0300432 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300433 if (dsa_is_dsa_port(priv->ds, i))
434 default_general_params.casc_port = i;
Vladimir Oltean227d07a2019-05-05 13:19:27 +0300435 else if (dsa_is_user_port(priv->ds, i))
436 priv->ports[i].mgmt_slot = k++;
437 }
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300438
439 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
440
441 if (table->entry_count) {
442 kfree(table->entries);
443 table->entry_count = 0;
444 }
445
446 table->entries = kcalloc(SJA1105_MAX_GENERAL_PARAMS_COUNT,
447 table->ops->unpacked_entry_size, GFP_KERNEL);
448 if (!table->entries)
449 return -ENOMEM;
450
451 table->entry_count = SJA1105_MAX_GENERAL_PARAMS_COUNT;
452
453 /* This table only has a single entry */
454 ((struct sja1105_general_params_entry *)table->entries)[0] =
455 default_general_params;
456
457 return 0;
458}
459
460#define SJA1105_RATE_MBPS(speed) (((speed) * 64000) / 1000)
461
462static inline void
463sja1105_setup_policer(struct sja1105_l2_policing_entry *policing,
464 int index)
465{
466 policing[index].sharindx = index;
467 policing[index].smax = 65535; /* Burst size in bytes */
468 policing[index].rate = SJA1105_RATE_MBPS(1000);
469 policing[index].maxlen = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
470 policing[index].partition = 0;
471}
472
473static int sja1105_init_l2_policing(struct sja1105_private *priv)
474{
475 struct sja1105_l2_policing_entry *policing;
476 struct sja1105_table *table;
477 int i, j, k;
478
479 table = &priv->static_config.tables[BLK_IDX_L2_POLICING];
480
481 /* Discard previous L2 Policing Table */
482 if (table->entry_count) {
483 kfree(table->entries);
484 table->entry_count = 0;
485 }
486
487 table->entries = kcalloc(SJA1105_MAX_L2_POLICING_COUNT,
488 table->ops->unpacked_entry_size, GFP_KERNEL);
489 if (!table->entries)
490 return -ENOMEM;
491
492 table->entry_count = SJA1105_MAX_L2_POLICING_COUNT;
493
494 policing = table->entries;
495
496 /* k sweeps through all unicast policers (0-39).
497 * bcast sweeps through policers 40-44.
498 */
499 for (i = 0, k = 0; i < SJA1105_NUM_PORTS; i++) {
500 int bcast = (SJA1105_NUM_PORTS * SJA1105_NUM_TC) + i;
501
502 for (j = 0; j < SJA1105_NUM_TC; j++, k++)
503 sja1105_setup_policer(policing, k);
504
505 /* Set up this port's policer for broadcast traffic */
506 sja1105_setup_policer(policing, bcast);
507 }
508 return 0;
509}
510
511static int sja1105_static_config_load(struct sja1105_private *priv,
512 struct sja1105_dt_port *ports)
513{
514 int rc;
515
516 sja1105_static_config_free(&priv->static_config);
517 rc = sja1105_static_config_init(&priv->static_config,
518 priv->info->static_ops,
519 priv->info->device_id);
520 if (rc)
521 return rc;
522
523 /* Build static configuration */
524 rc = sja1105_init_mac_settings(priv);
525 if (rc < 0)
526 return rc;
527 rc = sja1105_init_mii_settings(priv, ports);
528 if (rc < 0)
529 return rc;
530 rc = sja1105_init_static_fdb(priv);
531 if (rc < 0)
532 return rc;
533 rc = sja1105_init_static_vlan(priv);
534 if (rc < 0)
535 return rc;
536 rc = sja1105_init_l2_lookup_params(priv);
537 if (rc < 0)
538 return rc;
539 rc = sja1105_init_l2_forwarding(priv);
540 if (rc < 0)
541 return rc;
542 rc = sja1105_init_l2_forwarding_params(priv);
543 if (rc < 0)
544 return rc;
545 rc = sja1105_init_l2_policing(priv);
546 if (rc < 0)
547 return rc;
548 rc = sja1105_init_general_params(priv);
549 if (rc < 0)
550 return rc;
551
552 /* Send initial configuration to hardware via SPI */
553 return sja1105_static_config_upload(priv);
554}
555
Vladimir Olteanf5b86312019-05-02 23:23:32 +0300556static int sja1105_parse_rgmii_delays(struct sja1105_private *priv,
557 const struct sja1105_dt_port *ports)
558{
559 int i;
560
561 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
562 if (ports->role == XMII_MAC)
563 continue;
564
565 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_RXID ||
566 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
567 priv->rgmii_rx_delay[i] = true;
568
569 if (ports->phy_mode == PHY_INTERFACE_MODE_RGMII_TXID ||
570 ports->phy_mode == PHY_INTERFACE_MODE_RGMII_ID)
571 priv->rgmii_tx_delay[i] = true;
572
573 if ((priv->rgmii_rx_delay[i] || priv->rgmii_tx_delay[i]) &&
574 !priv->info->setup_rgmii_delay)
575 return -EINVAL;
576 }
577 return 0;
578}
579
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300580static int sja1105_parse_ports_node(struct sja1105_private *priv,
581 struct sja1105_dt_port *ports,
582 struct device_node *ports_node)
583{
584 struct device *dev = &priv->spidev->dev;
585 struct device_node *child;
586
587 for_each_child_of_node(ports_node, child) {
588 struct device_node *phy_node;
589 int phy_mode;
590 u32 index;
591
592 /* Get switch port number from DT */
593 if (of_property_read_u32(child, "reg", &index) < 0) {
594 dev_err(dev, "Port number not defined in device tree "
595 "(property \"reg\")\n");
596 return -ENODEV;
597 }
598
599 /* Get PHY mode from DT */
600 phy_mode = of_get_phy_mode(child);
601 if (phy_mode < 0) {
602 dev_err(dev, "Failed to read phy-mode or "
603 "phy-interface-type property for port %d\n",
604 index);
605 return -ENODEV;
606 }
607 ports[index].phy_mode = phy_mode;
608
609 phy_node = of_parse_phandle(child, "phy-handle", 0);
610 if (!phy_node) {
611 if (!of_phy_is_fixed_link(child)) {
612 dev_err(dev, "phy-handle or fixed-link "
613 "properties missing!\n");
614 return -ENODEV;
615 }
616 /* phy-handle is missing, but fixed-link isn't.
617 * So it's a fixed link. Default to PHY role.
618 */
619 ports[index].role = XMII_PHY;
620 } else {
621 /* phy-handle present => put port in MAC role */
622 ports[index].role = XMII_MAC;
623 of_node_put(phy_node);
624 }
625
626 /* The MAC/PHY role can be overridden with explicit bindings */
627 if (of_property_read_bool(child, "sja1105,role-mac"))
628 ports[index].role = XMII_MAC;
629 else if (of_property_read_bool(child, "sja1105,role-phy"))
630 ports[index].role = XMII_PHY;
631 }
632
633 return 0;
634}
635
636static int sja1105_parse_dt(struct sja1105_private *priv,
637 struct sja1105_dt_port *ports)
638{
639 struct device *dev = &priv->spidev->dev;
640 struct device_node *switch_node = dev->of_node;
641 struct device_node *ports_node;
642 int rc;
643
644 ports_node = of_get_child_by_name(switch_node, "ports");
645 if (!ports_node) {
646 dev_err(dev, "Incorrect bindings: absent \"ports\" node\n");
647 return -ENODEV;
648 }
649
650 rc = sja1105_parse_ports_node(priv, ports, ports_node);
651 of_node_put(ports_node);
652
653 return rc;
654}
655
656/* Convert back and forth MAC speed from Mbps to SJA1105 encoding */
657static int sja1105_speed[] = {
658 [SJA1105_SPEED_AUTO] = 0,
659 [SJA1105_SPEED_10MBPS] = 10,
660 [SJA1105_SPEED_100MBPS] = 100,
661 [SJA1105_SPEED_1000MBPS] = 1000,
662};
663
664static sja1105_speed_t sja1105_get_speed_cfg(unsigned int speed_mbps)
665{
666 int i;
667
668 for (i = SJA1105_SPEED_AUTO; i <= SJA1105_SPEED_1000MBPS; i++)
669 if (sja1105_speed[i] == speed_mbps)
670 return i;
671 return -EINVAL;
672}
673
674/* Set link speed and enable/disable traffic I/O in the MAC configuration
675 * for a specific port.
676 *
677 * @speed_mbps: If 0, leave the speed unchanged, else adapt MAC to PHY speed.
Vladimir Oltean640f7632019-05-05 13:19:28 +0300678 * @enabled: Manage Rx and Tx settings for this port. If false, overrides the
679 * settings from the STP state, but not persistently (does not
680 * overwrite the static MAC info for this port).
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300681 */
682static int sja1105_adjust_port_config(struct sja1105_private *priv, int port,
683 int speed_mbps, bool enabled)
684{
Vladimir Oltean640f7632019-05-05 13:19:28 +0300685 struct sja1105_mac_config_entry dyn_mac;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300686 struct sja1105_xmii_params_entry *mii;
687 struct sja1105_mac_config_entry *mac;
688 struct device *dev = priv->ds->dev;
689 sja1105_phy_interface_t phy_mode;
690 sja1105_speed_t speed;
691 int rc;
692
693 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
694 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
695
696 speed = sja1105_get_speed_cfg(speed_mbps);
697 if (speed_mbps && speed < 0) {
698 dev_err(dev, "Invalid speed %iMbps\n", speed_mbps);
699 return -EINVAL;
700 }
701
702 /* If requested, overwrite SJA1105_SPEED_AUTO from the static MAC
703 * configuration table, since this will be used for the clocking setup,
704 * and we no longer need to store it in the static config (already told
705 * hardware we want auto during upload phase).
706 */
707 if (speed_mbps)
708 mac[port].speed = speed;
709 else
710 mac[port].speed = SJA1105_SPEED_AUTO;
711
712 /* On P/Q/R/S, one can read from the device via the MAC reconfiguration
713 * tables. On E/T, MAC reconfig tables are not readable, only writable.
714 * We have to *know* what the MAC looks like. For the sake of keeping
715 * the code common, we'll use the static configuration tables as a
716 * reasonable approximation for both E/T and P/Q/R/S.
717 */
Vladimir Oltean640f7632019-05-05 13:19:28 +0300718 dyn_mac = mac[port];
719 dyn_mac.ingress = enabled && mac[port].ingress;
720 dyn_mac.egress = enabled && mac[port].egress;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300721
722 /* Write to the dynamic reconfiguration tables */
723 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG,
Vladimir Oltean640f7632019-05-05 13:19:28 +0300724 port, &dyn_mac, true);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300725 if (rc < 0) {
726 dev_err(dev, "Failed to write MAC config: %d\n", rc);
727 return rc;
728 }
729
730 /* Reconfigure the PLLs for the RGMII interfaces (required 125 MHz at
731 * gigabit, 25 MHz at 100 Mbps and 2.5 MHz at 10 Mbps). For MII and
732 * RMII no change of the clock setup is required. Actually, changing
733 * the clock setup does interrupt the clock signal for a certain time
734 * which causes trouble for all PHYs relying on this signal.
735 */
736 if (!enabled)
737 return 0;
738
739 phy_mode = mii->xmii_mode[port];
740 if (phy_mode != XMII_MODE_RGMII)
741 return 0;
742
743 return sja1105_clocking_setup_port(priv, port);
744}
745
Vladimir Olteanaf7cd032019-05-28 20:38:17 +0300746static void sja1105_mac_config(struct dsa_switch *ds, int port,
747 unsigned int link_an_mode,
748 const struct phylink_link_state *state)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300749{
750 struct sja1105_private *priv = ds->priv;
751
Vladimir Olteanaf7cd032019-05-28 20:38:17 +0300752 if (!state->link)
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300753 sja1105_adjust_port_config(priv, port, 0, false);
754 else
Vladimir Olteanaf7cd032019-05-28 20:38:17 +0300755 sja1105_adjust_port_config(priv, port, state->speed, true);
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +0300756}
757
Vladimir Olteanad9f2992019-05-02 23:23:38 +0300758static void sja1105_phylink_validate(struct dsa_switch *ds, int port,
759 unsigned long *supported,
760 struct phylink_link_state *state)
761{
762 /* Construct a new mask which exhaustively contains all link features
763 * supported by the MAC, and then apply that (logical AND) to what will
764 * be sent to the PHY for "marketing".
765 */
766 __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, };
767 struct sja1105_private *priv = ds->priv;
768 struct sja1105_xmii_params_entry *mii;
769
770 mii = priv->static_config.tables[BLK_IDX_XMII_PARAMS].entries;
771
772 /* The MAC does not support pause frames, and also doesn't
773 * support half-duplex traffic modes.
774 */
775 phylink_set(mask, Autoneg);
776 phylink_set(mask, MII);
777 phylink_set(mask, 10baseT_Full);
778 phylink_set(mask, 100baseT_Full);
779 if (mii->xmii_mode[port] == XMII_MODE_RGMII)
780 phylink_set(mask, 1000baseT_Full);
781
782 bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS);
783 bitmap_and(state->advertising, state->advertising, mask,
784 __ETHTOOL_LINK_MODE_MASK_NBITS);
785}
786
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300787/* First-generation switches have a 4-way set associative TCAM that
788 * holds the FDB entries. An FDB index spans from 0 to 1023 and is comprised of
789 * a "bin" (grouping of 4 entries) and a "way" (an entry within a bin).
790 * For the placement of a newly learnt FDB entry, the switch selects the bin
791 * based on a hash function, and the way within that bin incrementally.
792 */
793static inline int sja1105et_fdb_index(int bin, int way)
794{
795 return bin * SJA1105ET_FDB_BIN_SIZE + way;
796}
797
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300798static int sja1105et_is_fdb_entry_in_bin(struct sja1105_private *priv, int bin,
799 const u8 *addr, u16 vid,
800 struct sja1105_l2_lookup_entry *match,
801 int *last_unused)
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300802{
803 int way;
804
805 for (way = 0; way < SJA1105ET_FDB_BIN_SIZE; way++) {
806 struct sja1105_l2_lookup_entry l2_lookup = {0};
807 int index = sja1105et_fdb_index(bin, way);
808
809 /* Skip unused entries, optionally marking them
810 * into the return value
811 */
812 if (sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
813 index, &l2_lookup)) {
814 if (last_unused)
815 *last_unused = way;
816 continue;
817 }
818
819 if (l2_lookup.macaddr == ether_addr_to_u64(addr) &&
820 l2_lookup.vlanid == vid) {
821 if (match)
822 *match = l2_lookup;
823 return way;
824 }
825 }
826 /* Return an invalid entry index if not found */
827 return -1;
828}
829
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300830int sja1105et_fdb_add(struct dsa_switch *ds, int port,
831 const unsigned char *addr, u16 vid)
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300832{
833 struct sja1105_l2_lookup_entry l2_lookup = {0};
834 struct sja1105_private *priv = ds->priv;
835 struct device *dev = ds->dev;
836 int last_unused = -1;
837 int bin, way;
838
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300839 bin = sja1105et_fdb_hash(priv, addr, vid);
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300840
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300841 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
842 &l2_lookup, &last_unused);
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300843 if (way >= 0) {
844 /* We have an FDB entry. Is our port in the destination
845 * mask? If yes, we need to do nothing. If not, we need
846 * to rewrite the entry by adding this port to it.
847 */
848 if (l2_lookup.destports & BIT(port))
849 return 0;
850 l2_lookup.destports |= BIT(port);
851 } else {
852 int index = sja1105et_fdb_index(bin, way);
853
854 /* We don't have an FDB entry. We construct a new one and
855 * try to find a place for it within the FDB table.
856 */
857 l2_lookup.macaddr = ether_addr_to_u64(addr);
858 l2_lookup.destports = BIT(port);
859 l2_lookup.vlanid = vid;
860
861 if (last_unused >= 0) {
862 way = last_unused;
863 } else {
864 /* Bin is full, need to evict somebody.
865 * Choose victim at random. If you get these messages
866 * often, you may need to consider changing the
867 * distribution function:
868 * static_config[BLK_IDX_L2_LOOKUP_PARAMS].entries->poly
869 */
870 get_random_bytes(&way, sizeof(u8));
871 way %= SJA1105ET_FDB_BIN_SIZE;
872 dev_warn(dev, "Warning, FDB bin %d full while adding entry for %pM. Evicting entry %u.\n",
873 bin, addr, way);
874 /* Evict entry */
875 sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
876 index, NULL, false);
877 }
878 }
879 l2_lookup.index = sja1105et_fdb_index(bin, way);
880
881 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
882 l2_lookup.index, &l2_lookup,
883 true);
884}
885
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300886int sja1105et_fdb_del(struct dsa_switch *ds, int port,
887 const unsigned char *addr, u16 vid)
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300888{
889 struct sja1105_l2_lookup_entry l2_lookup = {0};
890 struct sja1105_private *priv = ds->priv;
891 int index, bin, way;
892 bool keep;
893
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300894 bin = sja1105et_fdb_hash(priv, addr, vid);
895 way = sja1105et_is_fdb_entry_in_bin(priv, bin, addr, vid,
896 &l2_lookup, NULL);
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300897 if (way < 0)
898 return 0;
899 index = sja1105et_fdb_index(bin, way);
900
901 /* We have an FDB entry. Is our port in the destination mask? If yes,
902 * we need to remove it. If the resulting port mask becomes empty, we
903 * need to completely evict the FDB entry.
904 * Otherwise we just write it back.
905 */
Vladimir Oltean7752e932019-06-03 00:15:54 +0300906 l2_lookup.destports &= ~BIT(port);
907
Vladimir Oltean291d1e72019-05-02 23:23:31 +0300908 if (l2_lookup.destports)
909 keep = true;
910 else
911 keep = false;
912
913 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
914 index, &l2_lookup, keep);
915}
916
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300917int sja1105pqrs_fdb_add(struct dsa_switch *ds, int port,
918 const unsigned char *addr, u16 vid)
919{
Vladimir Oltean1da73822019-06-03 00:15:45 +0300920 struct sja1105_l2_lookup_entry l2_lookup = {0};
921 struct sja1105_private *priv = ds->priv;
922 int rc, i;
923
924 /* Search for an existing entry in the FDB table */
925 l2_lookup.macaddr = ether_addr_to_u64(addr);
926 l2_lookup.vlanid = vid;
927 l2_lookup.iotag = SJA1105_S_TAG;
928 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
929 l2_lookup.mask_vlanid = VLAN_VID_MASK;
930 l2_lookup.mask_iotag = BIT(0);
931 l2_lookup.destports = BIT(port);
932
933 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
934 SJA1105_SEARCH, &l2_lookup);
935 if (rc == 0) {
936 /* Found and this port is already in the entry's
937 * port mask => job done
938 */
939 if (l2_lookup.destports & BIT(port))
940 return 0;
941 /* l2_lookup.index is populated by the switch in case it
942 * found something.
943 */
944 l2_lookup.destports |= BIT(port);
945 goto skip_finding_an_index;
946 }
947
948 /* Not found, so try to find an unused spot in the FDB.
949 * This is slightly inefficient because the strategy is knock-knock at
950 * every possible position from 0 to 1023.
951 */
952 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
953 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
954 i, NULL);
955 if (rc < 0)
956 break;
957 }
958 if (i == SJA1105_MAX_L2_LOOKUP_COUNT) {
959 dev_err(ds->dev, "FDB is full, cannot add entry.\n");
960 return -EINVAL;
961 }
962 l2_lookup.index = i;
963
964skip_finding_an_index:
965 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
966 l2_lookup.index, &l2_lookup,
967 true);
Vladimir Oltean9dfa6912019-06-03 00:11:57 +0300968}
969
970int sja1105pqrs_fdb_del(struct dsa_switch *ds, int port,
971 const unsigned char *addr, u16 vid)
972{
Vladimir Oltean1da73822019-06-03 00:15:45 +0300973 struct sja1105_l2_lookup_entry l2_lookup = {0};
974 struct sja1105_private *priv = ds->priv;
975 bool keep;
976 int rc;
977
978 l2_lookup.macaddr = ether_addr_to_u64(addr);
979 l2_lookup.vlanid = vid;
980 l2_lookup.iotag = SJA1105_S_TAG;
981 l2_lookup.mask_macaddr = GENMASK_ULL(ETH_ALEN * 8 - 1, 0);
982 l2_lookup.mask_vlanid = VLAN_VID_MASK;
983 l2_lookup.mask_iotag = BIT(0);
984 l2_lookup.destports = BIT(port);
985
986 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
987 SJA1105_SEARCH, &l2_lookup);
988 if (rc < 0)
989 return 0;
990
991 l2_lookup.destports &= ~BIT(port);
992
993 /* Decide whether we remove just this port from the FDB entry,
994 * or if we remove it completely.
995 */
996 if (l2_lookup.destports)
997 keep = true;
998 else
999 keep = false;
1000
1001 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_LOOKUP,
1002 l2_lookup.index, &l2_lookup, keep);
Vladimir Oltean9dfa6912019-06-03 00:11:57 +03001003}
1004
1005static int sja1105_fdb_add(struct dsa_switch *ds, int port,
1006 const unsigned char *addr, u16 vid)
1007{
1008 struct sja1105_private *priv = ds->priv;
1009
1010 return priv->info->fdb_add_cmd(ds, port, addr, vid);
1011}
1012
1013static int sja1105_fdb_del(struct dsa_switch *ds, int port,
1014 const unsigned char *addr, u16 vid)
1015{
1016 struct sja1105_private *priv = ds->priv;
1017
1018 return priv->info->fdb_del_cmd(ds, port, addr, vid);
1019}
1020
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001021static int sja1105_fdb_dump(struct dsa_switch *ds, int port,
1022 dsa_fdb_dump_cb_t *cb, void *data)
1023{
1024 struct sja1105_private *priv = ds->priv;
1025 struct device *dev = ds->dev;
1026 int i;
1027
1028 for (i = 0; i < SJA1105_MAX_L2_LOOKUP_COUNT; i++) {
1029 struct sja1105_l2_lookup_entry l2_lookup = {0};
1030 u8 macaddr[ETH_ALEN];
1031 int rc;
1032
1033 rc = sja1105_dynamic_config_read(priv, BLK_IDX_L2_LOOKUP,
1034 i, &l2_lookup);
1035 /* No fdb entry at i, not an issue */
Vladimir Olteandef84602019-06-03 00:11:59 +03001036 if (rc == -ENOENT)
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001037 continue;
1038 if (rc) {
1039 dev_err(dev, "Failed to dump FDB: %d\n", rc);
1040 return rc;
1041 }
1042
1043 /* FDB dump callback is per port. This means we have to
1044 * disregard a valid entry if it's not for this port, even if
1045 * only to revisit it later. This is inefficient because the
1046 * 1024-sized FDB table needs to be traversed 4 times through
1047 * SPI during a 'bridge fdb show' command.
1048 */
1049 if (!(l2_lookup.destports & BIT(port)))
1050 continue;
1051 u64_to_ether_addr(l2_lookup.macaddr, macaddr);
1052 cb(macaddr, l2_lookup.vlanid, false, data);
1053 }
1054 return 0;
1055}
1056
1057/* This callback needs to be present */
1058static int sja1105_mdb_prepare(struct dsa_switch *ds, int port,
1059 const struct switchdev_obj_port_mdb *mdb)
1060{
1061 return 0;
1062}
1063
1064static void sja1105_mdb_add(struct dsa_switch *ds, int port,
1065 const struct switchdev_obj_port_mdb *mdb)
1066{
1067 sja1105_fdb_add(ds, port, mdb->addr, mdb->vid);
1068}
1069
1070static int sja1105_mdb_del(struct dsa_switch *ds, int port,
1071 const struct switchdev_obj_port_mdb *mdb)
1072{
1073 return sja1105_fdb_del(ds, port, mdb->addr, mdb->vid);
1074}
1075
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001076static int sja1105_bridge_member(struct dsa_switch *ds, int port,
1077 struct net_device *br, bool member)
1078{
1079 struct sja1105_l2_forwarding_entry *l2_fwd;
1080 struct sja1105_private *priv = ds->priv;
1081 int i, rc;
1082
1083 l2_fwd = priv->static_config.tables[BLK_IDX_L2_FORWARDING].entries;
1084
1085 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1086 /* Add this port to the forwarding matrix of the
1087 * other ports in the same bridge, and viceversa.
1088 */
1089 if (!dsa_is_user_port(ds, i))
1090 continue;
1091 /* For the ports already under the bridge, only one thing needs
1092 * to be done, and that is to add this port to their
1093 * reachability domain. So we can perform the SPI write for
1094 * them immediately. However, for this port itself (the one
1095 * that is new to the bridge), we need to add all other ports
1096 * to its reachability domain. So we do that incrementally in
1097 * this loop, and perform the SPI write only at the end, once
1098 * the domain contains all other bridge ports.
1099 */
1100 if (i == port)
1101 continue;
1102 if (dsa_to_port(ds, i)->bridge_dev != br)
1103 continue;
1104 sja1105_port_allow_traffic(l2_fwd, i, port, member);
1105 sja1105_port_allow_traffic(l2_fwd, port, i, member);
1106
1107 rc = sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1108 i, &l2_fwd[i], true);
1109 if (rc < 0)
1110 return rc;
1111 }
1112
1113 return sja1105_dynamic_config_write(priv, BLK_IDX_L2_FORWARDING,
1114 port, &l2_fwd[port], true);
1115}
1116
Vladimir Oltean640f7632019-05-05 13:19:28 +03001117static void sja1105_bridge_stp_state_set(struct dsa_switch *ds, int port,
1118 u8 state)
1119{
1120 struct sja1105_private *priv = ds->priv;
1121 struct sja1105_mac_config_entry *mac;
1122
1123 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1124
1125 switch (state) {
1126 case BR_STATE_DISABLED:
1127 case BR_STATE_BLOCKING:
1128 /* From UM10944 description of DRPDTAG (why put this there?):
1129 * "Management traffic flows to the port regardless of the state
1130 * of the INGRESS flag". So BPDUs are still be allowed to pass.
1131 * At the moment no difference between DISABLED and BLOCKING.
1132 */
1133 mac[port].ingress = false;
1134 mac[port].egress = false;
1135 mac[port].dyn_learn = false;
1136 break;
1137 case BR_STATE_LISTENING:
1138 mac[port].ingress = true;
1139 mac[port].egress = false;
1140 mac[port].dyn_learn = false;
1141 break;
1142 case BR_STATE_LEARNING:
1143 mac[port].ingress = true;
1144 mac[port].egress = false;
1145 mac[port].dyn_learn = true;
1146 break;
1147 case BR_STATE_FORWARDING:
1148 mac[port].ingress = true;
1149 mac[port].egress = true;
1150 mac[port].dyn_learn = true;
1151 break;
1152 default:
1153 dev_err(ds->dev, "invalid STP state: %d\n", state);
1154 return;
1155 }
1156
1157 sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1158 &mac[port], true);
1159}
1160
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001161static int sja1105_bridge_join(struct dsa_switch *ds, int port,
1162 struct net_device *br)
1163{
1164 return sja1105_bridge_member(ds, port, br, true);
1165}
1166
1167static void sja1105_bridge_leave(struct dsa_switch *ds, int port,
1168 struct net_device *br)
1169{
1170 sja1105_bridge_member(ds, port, br, false);
1171}
1172
Vladimir Oltean640f7632019-05-05 13:19:28 +03001173static u8 sja1105_stp_state_get(struct sja1105_private *priv, int port)
1174{
1175 struct sja1105_mac_config_entry *mac;
1176
1177 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1178
1179 if (!mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1180 return BR_STATE_BLOCKING;
1181 if (mac[port].ingress && !mac[port].egress && !mac[port].dyn_learn)
1182 return BR_STATE_LISTENING;
1183 if (mac[port].ingress && !mac[port].egress && mac[port].dyn_learn)
1184 return BR_STATE_LEARNING;
1185 if (mac[port].ingress && mac[port].egress && mac[port].dyn_learn)
1186 return BR_STATE_FORWARDING;
Vladimir Oltean3b2c4f42019-05-08 23:32:25 +03001187 /* This is really an error condition if the MAC was in none of the STP
1188 * states above. But treating the port as disabled does nothing, which
1189 * is adequate, and it also resets the MAC to a known state later on.
1190 */
1191 return BR_STATE_DISABLED;
Vladimir Oltean640f7632019-05-05 13:19:28 +03001192}
1193
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001194/* For situations where we need to change a setting at runtime that is only
1195 * available through the static configuration, resetting the switch in order
1196 * to upload the new static config is unavoidable. Back up the settings we
1197 * modify at runtime (currently only MAC) and restore them after uploading,
1198 * such that this operation is relatively seamless.
1199 */
1200static int sja1105_static_config_reload(struct sja1105_private *priv)
1201{
1202 struct sja1105_mac_config_entry *mac;
1203 int speed_mbps[SJA1105_NUM_PORTS];
Vladimir Oltean640f7632019-05-05 13:19:28 +03001204 u8 stp_state[SJA1105_NUM_PORTS];
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001205 int rc, i;
1206
1207 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1208
1209 /* Back up settings changed by sja1105_adjust_port_config and
Vladimir Oltean640f7632019-05-05 13:19:28 +03001210 * sja1105_bridge_stp_state_set and restore their defaults.
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001211 */
1212 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1213 speed_mbps[i] = sja1105_speed[mac[i].speed];
1214 mac[i].speed = SJA1105_SPEED_AUTO;
Vladimir Oltean640f7632019-05-05 13:19:28 +03001215 if (i == dsa_upstream_port(priv->ds, i)) {
1216 mac[i].ingress = true;
1217 mac[i].egress = true;
1218 mac[i].dyn_learn = true;
1219 } else {
1220 stp_state[i] = sja1105_stp_state_get(priv, i);
1221 mac[i].ingress = false;
1222 mac[i].egress = false;
1223 mac[i].dyn_learn = false;
1224 }
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001225 }
1226
1227 /* Reset switch and send updated static configuration */
1228 rc = sja1105_static_config_upload(priv);
1229 if (rc < 0)
1230 goto out;
1231
1232 /* Configure the CGU (PLLs) for MII and RMII PHYs.
1233 * For these interfaces there is no dynamic configuration
1234 * needed, since PLLs have same settings at all speeds.
1235 */
1236 rc = sja1105_clocking_setup(priv);
1237 if (rc < 0)
1238 goto out;
1239
1240 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1241 bool enabled = (speed_mbps[i] != 0);
1242
Vladimir Oltean640f7632019-05-05 13:19:28 +03001243 if (i != dsa_upstream_port(priv->ds, i))
1244 sja1105_bridge_stp_state_set(priv->ds, i, stp_state[i]);
1245
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001246 rc = sja1105_adjust_port_config(priv, i, speed_mbps[i],
1247 enabled);
1248 if (rc < 0)
1249 goto out;
1250 }
1251out:
1252 return rc;
1253}
1254
1255/* The TPID setting belongs to the General Parameters table,
1256 * which can only be partially reconfigured at runtime (and not the TPID).
1257 * So a switch reset is required.
1258 */
1259static int sja1105_change_tpid(struct sja1105_private *priv,
1260 u16 tpid, u16 tpid2)
1261{
1262 struct sja1105_general_params_entry *general_params;
1263 struct sja1105_table *table;
1264
1265 table = &priv->static_config.tables[BLK_IDX_GENERAL_PARAMS];
1266 general_params = table->entries;
1267 general_params->tpid = tpid;
1268 general_params->tpid2 = tpid2;
1269 return sja1105_static_config_reload(priv);
1270}
1271
1272static int sja1105_pvid_apply(struct sja1105_private *priv, int port, u16 pvid)
1273{
1274 struct sja1105_mac_config_entry *mac;
1275
1276 mac = priv->static_config.tables[BLK_IDX_MAC_CONFIG].entries;
1277
1278 mac[port].vlanid = pvid;
1279
1280 return sja1105_dynamic_config_write(priv, BLK_IDX_MAC_CONFIG, port,
1281 &mac[port], true);
1282}
1283
1284static int sja1105_is_vlan_configured(struct sja1105_private *priv, u16 vid)
1285{
1286 struct sja1105_vlan_lookup_entry *vlan;
1287 int count, i;
1288
1289 vlan = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entries;
1290 count = priv->static_config.tables[BLK_IDX_VLAN_LOOKUP].entry_count;
1291
1292 for (i = 0; i < count; i++)
1293 if (vlan[i].vlanid == vid)
1294 return i;
1295
1296 /* Return an invalid entry index if not found */
1297 return -1;
1298}
1299
1300static int sja1105_vlan_apply(struct sja1105_private *priv, int port, u16 vid,
1301 bool enabled, bool untagged)
1302{
1303 struct sja1105_vlan_lookup_entry *vlan;
1304 struct sja1105_table *table;
1305 bool keep = true;
1306 int match, rc;
1307
1308 table = &priv->static_config.tables[BLK_IDX_VLAN_LOOKUP];
1309
1310 match = sja1105_is_vlan_configured(priv, vid);
1311 if (match < 0) {
1312 /* Can't delete a missing entry. */
1313 if (!enabled)
1314 return 0;
1315 rc = sja1105_table_resize(table, table->entry_count + 1);
1316 if (rc)
1317 return rc;
1318 match = table->entry_count - 1;
1319 }
1320 /* Assign pointer after the resize (it's new memory) */
1321 vlan = table->entries;
1322 vlan[match].vlanid = vid;
1323 if (enabled) {
1324 vlan[match].vlan_bc |= BIT(port);
1325 vlan[match].vmemb_port |= BIT(port);
1326 } else {
1327 vlan[match].vlan_bc &= ~BIT(port);
1328 vlan[match].vmemb_port &= ~BIT(port);
1329 }
1330 /* Also unset tag_port if removing this VLAN was requested,
1331 * just so we don't have a confusing bitmap (no practical purpose).
1332 */
1333 if (untagged || !enabled)
1334 vlan[match].tag_port &= ~BIT(port);
1335 else
1336 vlan[match].tag_port |= BIT(port);
1337 /* If there's no port left as member of this VLAN,
1338 * it's time for it to go.
1339 */
1340 if (!vlan[match].vmemb_port)
1341 keep = false;
1342
1343 dev_dbg(priv->ds->dev,
1344 "%s: port %d, vid %llu, broadcast domain 0x%llx, "
1345 "port members 0x%llx, tagged ports 0x%llx, keep %d\n",
1346 __func__, port, vlan[match].vlanid, vlan[match].vlan_bc,
1347 vlan[match].vmemb_port, vlan[match].tag_port, keep);
1348
1349 rc = sja1105_dynamic_config_write(priv, BLK_IDX_VLAN_LOOKUP, vid,
1350 &vlan[match], keep);
1351 if (rc < 0)
1352 return rc;
1353
1354 if (!keep)
1355 return sja1105_table_delete_entry(table, match);
1356
1357 return 0;
1358}
1359
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001360static int sja1105_setup_8021q_tagging(struct dsa_switch *ds, bool enabled)
1361{
1362 int rc, i;
1363
1364 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1365 rc = dsa_port_setup_8021q_tagging(ds, i, enabled);
1366 if (rc < 0) {
1367 dev_err(ds->dev, "Failed to setup VLAN tagging for port %d: %d\n",
1368 i, rc);
1369 return rc;
1370 }
1371 }
1372 dev_info(ds->dev, "%s switch tagging\n",
1373 enabled ? "Enabled" : "Disabled");
1374 return 0;
1375}
1376
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001377static enum dsa_tag_protocol
1378sja1105_get_tag_protocol(struct dsa_switch *ds, int port)
1379{
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001380 return DSA_TAG_PROTO_SJA1105;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001381}
1382
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001383/* This callback needs to be present */
1384static int sja1105_vlan_prepare(struct dsa_switch *ds, int port,
1385 const struct switchdev_obj_port_vlan *vlan)
1386{
1387 return 0;
1388}
1389
1390static int sja1105_vlan_filtering(struct dsa_switch *ds, int port, bool enabled)
1391{
1392 struct sja1105_private *priv = ds->priv;
1393 int rc;
1394
1395 if (enabled)
1396 /* Enable VLAN filtering. */
1397 rc = sja1105_change_tpid(priv, ETH_P_8021Q, ETH_P_8021AD);
1398 else
1399 /* Disable VLAN filtering. */
1400 rc = sja1105_change_tpid(priv, ETH_P_SJA1105, ETH_P_SJA1105);
1401 if (rc)
1402 dev_err(ds->dev, "Failed to change VLAN Ethertype\n");
1403
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001404 /* Switch port identification based on 802.1Q is only passable
1405 * if we are not under a vlan_filtering bridge. So make sure
1406 * the two configurations are mutually exclusive.
1407 */
1408 return sja1105_setup_8021q_tagging(ds, !enabled);
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001409}
1410
1411static void sja1105_vlan_add(struct dsa_switch *ds, int port,
1412 const struct switchdev_obj_port_vlan *vlan)
1413{
1414 struct sja1105_private *priv = ds->priv;
1415 u16 vid;
1416 int rc;
1417
1418 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1419 rc = sja1105_vlan_apply(priv, port, vid, true, vlan->flags &
1420 BRIDGE_VLAN_INFO_UNTAGGED);
1421 if (rc < 0) {
1422 dev_err(ds->dev, "Failed to add VLAN %d to port %d: %d\n",
1423 vid, port, rc);
1424 return;
1425 }
1426 if (vlan->flags & BRIDGE_VLAN_INFO_PVID) {
1427 rc = sja1105_pvid_apply(ds->priv, port, vid);
1428 if (rc < 0) {
1429 dev_err(ds->dev, "Failed to set pvid %d on port %d: %d\n",
1430 vid, port, rc);
1431 return;
1432 }
1433 }
1434 }
1435}
1436
1437static int sja1105_vlan_del(struct dsa_switch *ds, int port,
1438 const struct switchdev_obj_port_vlan *vlan)
1439{
1440 struct sja1105_private *priv = ds->priv;
1441 u16 vid;
1442 int rc;
1443
1444 for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) {
1445 rc = sja1105_vlan_apply(priv, port, vid, false, vlan->flags &
1446 BRIDGE_VLAN_INFO_UNTAGGED);
1447 if (rc < 0) {
1448 dev_err(ds->dev, "Failed to remove VLAN %d from port %d: %d\n",
1449 vid, port, rc);
1450 return rc;
1451 }
1452 }
1453 return 0;
1454}
1455
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001456/* The programming model for the SJA1105 switch is "all-at-once" via static
1457 * configuration tables. Some of these can be dynamically modified at runtime,
1458 * but not the xMII mode parameters table.
1459 * Furthermode, some PHYs may not have crystals for generating their clocks
1460 * (e.g. RMII). Instead, their 50MHz clock is supplied via the SJA1105 port's
1461 * ref_clk pin. So port clocking needs to be initialized early, before
1462 * connecting to PHYs is attempted, otherwise they won't respond through MDIO.
1463 * Setting correct PHY link speed does not matter now.
1464 * But dsa_slave_phy_setup is called later than sja1105_setup, so the PHY
1465 * bindings are not yet parsed by DSA core. We need to parse early so that we
1466 * can populate the xMII mode parameters table.
1467 */
1468static int sja1105_setup(struct dsa_switch *ds)
1469{
1470 struct sja1105_dt_port ports[SJA1105_NUM_PORTS];
1471 struct sja1105_private *priv = ds->priv;
1472 int rc;
1473
1474 rc = sja1105_parse_dt(priv, ports);
1475 if (rc < 0) {
1476 dev_err(ds->dev, "Failed to parse DT: %d\n", rc);
1477 return rc;
1478 }
Vladimir Olteanf5b86312019-05-02 23:23:32 +03001479
1480 /* Error out early if internal delays are required through DT
1481 * and we can't apply them.
1482 */
1483 rc = sja1105_parse_rgmii_delays(priv, ports);
1484 if (rc < 0) {
1485 dev_err(ds->dev, "RGMII delay not supported\n");
1486 return rc;
1487 }
1488
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001489 /* Create and send configuration down to device */
1490 rc = sja1105_static_config_load(priv, ports);
1491 if (rc < 0) {
1492 dev_err(ds->dev, "Failed to load static config: %d\n", rc);
1493 return rc;
1494 }
1495 /* Configure the CGU (PHY link modes and speeds) */
1496 rc = sja1105_clocking_setup(priv);
1497 if (rc < 0) {
1498 dev_err(ds->dev, "Failed to configure MII clocking: %d\n", rc);
1499 return rc;
1500 }
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001501 /* On SJA1105, VLAN filtering per se is always enabled in hardware.
1502 * The only thing we can do to disable it is lie about what the 802.1Q
1503 * EtherType is.
1504 * So it will still try to apply VLAN filtering, but all ingress
1505 * traffic (except frames received with EtherType of ETH_P_SJA1105)
1506 * will be internally tagged with a distorted VLAN header where the
1507 * TPID is ETH_P_SJA1105, and the VLAN ID is the port pvid.
1508 */
1509 ds->vlan_filtering_is_global = true;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001510
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001511 /* The DSA/switchdev model brings up switch ports in standalone mode by
1512 * default, and that means vlan_filtering is 0 since they're not under
1513 * a bridge, so it's safe to set up switch tagging at this time.
1514 */
1515 return sja1105_setup_8021q_tagging(ds, true);
1516}
1517
1518static int sja1105_mgmt_xmit(struct dsa_switch *ds, int port, int slot,
1519 struct sk_buff *skb)
1520{
1521 struct sja1105_mgmt_entry mgmt_route = {0};
1522 struct sja1105_private *priv = ds->priv;
1523 struct ethhdr *hdr;
1524 int timeout = 10;
1525 int rc;
1526
1527 hdr = eth_hdr(skb);
1528
1529 mgmt_route.macaddr = ether_addr_to_u64(hdr->h_dest);
1530 mgmt_route.destports = BIT(port);
1531 mgmt_route.enfport = 1;
1532
1533 rc = sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1534 slot, &mgmt_route, true);
1535 if (rc < 0) {
1536 kfree_skb(skb);
1537 return rc;
1538 }
1539
1540 /* Transfer skb to the host port. */
1541 dsa_enqueue_skb(skb, ds->ports[port].slave);
1542
1543 /* Wait until the switch has processed the frame */
1544 do {
1545 rc = sja1105_dynamic_config_read(priv, BLK_IDX_MGMT_ROUTE,
1546 slot, &mgmt_route);
1547 if (rc < 0) {
1548 dev_err_ratelimited(priv->ds->dev,
1549 "failed to poll for mgmt route\n");
1550 continue;
1551 }
1552
1553 /* UM10944: The ENFPORT flag of the respective entry is
1554 * cleared when a match is found. The host can use this
1555 * flag as an acknowledgment.
1556 */
1557 cpu_relax();
1558 } while (mgmt_route.enfport && --timeout);
1559
1560 if (!timeout) {
1561 /* Clean up the management route so that a follow-up
1562 * frame may not match on it by mistake.
Vladimir Oltean2a7e7402019-06-03 00:15:33 +03001563 * This is only hardware supported on P/Q/R/S - on E/T it is
1564 * a no-op and we are silently discarding the -EOPNOTSUPP.
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001565 */
1566 sja1105_dynamic_config_write(priv, BLK_IDX_MGMT_ROUTE,
1567 slot, &mgmt_route, false);
1568 dev_err_ratelimited(priv->ds->dev, "xmit timed out\n");
1569 }
1570
1571 return NETDEV_TX_OK;
1572}
1573
1574/* Deferred work is unfortunately necessary because setting up the management
1575 * route cannot be done from atomit context (SPI transfer takes a sleepable
1576 * lock on the bus)
1577 */
1578static netdev_tx_t sja1105_port_deferred_xmit(struct dsa_switch *ds, int port,
1579 struct sk_buff *skb)
1580{
1581 struct sja1105_private *priv = ds->priv;
1582 struct sja1105_port *sp = &priv->ports[port];
1583 int slot = sp->mgmt_slot;
1584
1585 /* The tragic fact about the switch having 4x2 slots for installing
1586 * management routes is that all of them except one are actually
1587 * useless.
1588 * If 2 slots are simultaneously configured for two BPDUs sent to the
1589 * same (multicast) DMAC but on different egress ports, the switch
1590 * would confuse them and redirect first frame it receives on the CPU
1591 * port towards the port configured on the numerically first slot
1592 * (therefore wrong port), then second received frame on second slot
1593 * (also wrong port).
1594 * So for all practical purposes, there needs to be a lock that
1595 * prevents that from happening. The slot used here is utterly useless
1596 * (could have simply been 0 just as fine), but we are doing it
1597 * nonetheless, in case a smarter idea ever comes up in the future.
1598 */
1599 mutex_lock(&priv->mgmt_lock);
1600
1601 sja1105_mgmt_xmit(ds, port, slot, skb);
1602
1603 mutex_unlock(&priv->mgmt_lock);
1604 return NETDEV_TX_OK;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001605}
1606
Vladimir Oltean84567212019-05-02 23:23:36 +03001607/* The MAXAGE setting belongs to the L2 Forwarding Parameters table,
1608 * which cannot be reconfigured at runtime. So a switch reset is required.
1609 */
1610static int sja1105_set_ageing_time(struct dsa_switch *ds,
1611 unsigned int ageing_time)
1612{
1613 struct sja1105_l2_lookup_params_entry *l2_lookup_params;
1614 struct sja1105_private *priv = ds->priv;
1615 struct sja1105_table *table;
1616 unsigned int maxage;
1617
1618 table = &priv->static_config.tables[BLK_IDX_L2_LOOKUP_PARAMS];
1619 l2_lookup_params = table->entries;
1620
1621 maxage = SJA1105_AGEING_TIME_MS(ageing_time);
1622
1623 if (l2_lookup_params->maxage == maxage)
1624 return 0;
1625
1626 l2_lookup_params->maxage = maxage;
1627
1628 return sja1105_static_config_reload(priv);
1629}
1630
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001631static const struct dsa_switch_ops sja1105_switch_ops = {
1632 .get_tag_protocol = sja1105_get_tag_protocol,
1633 .setup = sja1105_setup,
Vladimir Oltean84567212019-05-02 23:23:36 +03001634 .set_ageing_time = sja1105_set_ageing_time,
Vladimir Olteanad9f2992019-05-02 23:23:38 +03001635 .phylink_validate = sja1105_phylink_validate,
Vladimir Olteanaf7cd032019-05-28 20:38:17 +03001636 .phylink_mac_config = sja1105_mac_config,
Vladimir Oltean52c34e62019-05-02 23:23:35 +03001637 .get_strings = sja1105_get_strings,
1638 .get_ethtool_stats = sja1105_get_ethtool_stats,
1639 .get_sset_count = sja1105_get_sset_count,
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001640 .port_fdb_dump = sja1105_fdb_dump,
1641 .port_fdb_add = sja1105_fdb_add,
1642 .port_fdb_del = sja1105_fdb_del,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001643 .port_bridge_join = sja1105_bridge_join,
1644 .port_bridge_leave = sja1105_bridge_leave,
Vladimir Oltean640f7632019-05-05 13:19:28 +03001645 .port_stp_state_set = sja1105_bridge_stp_state_set,
Vladimir Oltean6666ceb2019-05-02 23:23:34 +03001646 .port_vlan_prepare = sja1105_vlan_prepare,
1647 .port_vlan_filtering = sja1105_vlan_filtering,
1648 .port_vlan_add = sja1105_vlan_add,
1649 .port_vlan_del = sja1105_vlan_del,
Vladimir Oltean291d1e72019-05-02 23:23:31 +03001650 .port_mdb_prepare = sja1105_mdb_prepare,
1651 .port_mdb_add = sja1105_mdb_add,
1652 .port_mdb_del = sja1105_mdb_del,
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001653 .port_deferred_xmit = sja1105_port_deferred_xmit,
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001654};
1655
1656static int sja1105_check_device_id(struct sja1105_private *priv)
1657{
1658 const struct sja1105_regs *regs = priv->info->regs;
1659 u8 prod_id[SJA1105_SIZE_DEVICE_ID] = {0};
1660 struct device *dev = &priv->spidev->dev;
1661 u64 device_id;
1662 u64 part_no;
1663 int rc;
1664
1665 rc = sja1105_spi_send_int(priv, SPI_READ, regs->device_id,
1666 &device_id, SJA1105_SIZE_DEVICE_ID);
1667 if (rc < 0)
1668 return rc;
1669
1670 if (device_id != priv->info->device_id) {
1671 dev_err(dev, "Expected device ID 0x%llx but read 0x%llx\n",
1672 priv->info->device_id, device_id);
1673 return -ENODEV;
1674 }
1675
1676 rc = sja1105_spi_send_packed_buf(priv, SPI_READ, regs->prod_id,
1677 prod_id, SJA1105_SIZE_DEVICE_ID);
1678 if (rc < 0)
1679 return rc;
1680
1681 sja1105_unpack(prod_id, &part_no, 19, 4, SJA1105_SIZE_DEVICE_ID);
1682
1683 if (part_no != priv->info->part_no) {
1684 dev_err(dev, "Expected part number 0x%llx but read 0x%llx\n",
1685 priv->info->part_no, part_no);
1686 return -ENODEV;
1687 }
1688
1689 return 0;
1690}
1691
1692static int sja1105_probe(struct spi_device *spi)
1693{
1694 struct device *dev = &spi->dev;
1695 struct sja1105_private *priv;
1696 struct dsa_switch *ds;
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001697 int rc, i;
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001698
1699 if (!dev->of_node) {
1700 dev_err(dev, "No DTS bindings for SJA1105 driver\n");
1701 return -EINVAL;
1702 }
1703
1704 priv = devm_kzalloc(dev, sizeof(struct sja1105_private), GFP_KERNEL);
1705 if (!priv)
1706 return -ENOMEM;
1707
1708 /* Configure the optional reset pin and bring up switch */
1709 priv->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
1710 if (IS_ERR(priv->reset_gpio))
1711 dev_dbg(dev, "reset-gpios not defined, ignoring\n");
1712 else
1713 sja1105_hw_reset(priv->reset_gpio, 1, 1);
1714
1715 /* Populate our driver private structure (priv) based on
1716 * the device tree node that was probed (spi)
1717 */
1718 priv->spidev = spi;
1719 spi_set_drvdata(spi, priv);
1720
1721 /* Configure the SPI bus */
1722 spi->bits_per_word = 8;
1723 rc = spi_setup(spi);
1724 if (rc < 0) {
1725 dev_err(dev, "Could not init SPI\n");
1726 return rc;
1727 }
1728
1729 priv->info = of_device_get_match_data(dev);
1730
1731 /* Detect hardware device */
1732 rc = sja1105_check_device_id(priv);
1733 if (rc < 0) {
1734 dev_err(dev, "Device ID check failed: %d\n", rc);
1735 return rc;
1736 }
1737
1738 dev_info(dev, "Probed switch chip: %s\n", priv->info->name);
1739
1740 ds = dsa_switch_alloc(dev, SJA1105_NUM_PORTS);
1741 if (!ds)
1742 return -ENOMEM;
1743
1744 ds->ops = &sja1105_switch_ops;
1745 ds->priv = priv;
1746 priv->ds = ds;
1747
Vladimir Oltean227d07a2019-05-05 13:19:27 +03001748 /* Connections between dsa_port and sja1105_port */
1749 for (i = 0; i < SJA1105_NUM_PORTS; i++) {
1750 struct sja1105_port *sp = &priv->ports[i];
1751
1752 ds->ports[i].priv = sp;
1753 sp->dp = &ds->ports[i];
1754 }
1755 mutex_init(&priv->mgmt_lock);
1756
Vladimir Oltean8aa9ebc2019-05-02 23:23:30 +03001757 return dsa_register_switch(priv->ds);
1758}
1759
1760static int sja1105_remove(struct spi_device *spi)
1761{
1762 struct sja1105_private *priv = spi_get_drvdata(spi);
1763
1764 dsa_unregister_switch(priv->ds);
1765 sja1105_static_config_free(&priv->static_config);
1766 return 0;
1767}
1768
1769static const struct of_device_id sja1105_dt_ids[] = {
1770 { .compatible = "nxp,sja1105e", .data = &sja1105e_info },
1771 { .compatible = "nxp,sja1105t", .data = &sja1105t_info },
1772 { .compatible = "nxp,sja1105p", .data = &sja1105p_info },
1773 { .compatible = "nxp,sja1105q", .data = &sja1105q_info },
1774 { .compatible = "nxp,sja1105r", .data = &sja1105r_info },
1775 { .compatible = "nxp,sja1105s", .data = &sja1105s_info },
1776 { /* sentinel */ },
1777};
1778MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
1779
1780static struct spi_driver sja1105_driver = {
1781 .driver = {
1782 .name = "sja1105",
1783 .owner = THIS_MODULE,
1784 .of_match_table = of_match_ptr(sja1105_dt_ids),
1785 },
1786 .probe = sja1105_probe,
1787 .remove = sja1105_remove,
1788};
1789
1790module_spi_driver(sja1105_driver);
1791
1792MODULE_AUTHOR("Vladimir Oltean <olteanv@gmail.com>");
1793MODULE_AUTHOR("Georg Waibel <georg.waibel@sensor-technik.de>");
1794MODULE_DESCRIPTION("SJA1105 Driver");
1795MODULE_LICENSE("GPL v2");