blob: 9526bdf2a34a3ece9ca3b80ff45e3f634efa7183 [file] [log] [blame]
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001/*
2 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
3 * Copyright (c) 2008-2009 Marvell Semiconductor
4 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
5 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 */
12
13#include <linux/device.h>
14#include <linux/err.h>
15#include <linux/list.h>
16#include <linux/slab.h>
17#include <linux/rtnetlink.h>
18#include <net/dsa.h>
19#include <linux/of.h>
20#include <linux/of_net.h>
21#include "dsa_priv.h"
22
23static LIST_HEAD(dsa_switch_trees);
24static DEFINE_MUTEX(dsa2_mutex);
25
26static struct dsa_switch_tree *dsa_get_dst(u32 tree)
27{
28 struct dsa_switch_tree *dst;
29
30 list_for_each_entry(dst, &dsa_switch_trees, list)
Nikita Yushchenko7a99cd62016-11-28 09:48:48 +030031 if (dst->tree == tree) {
32 kref_get(&dst->refcount);
Andrew Lunn83c0afa2016-06-04 21:17:07 +020033 return dst;
Nikita Yushchenko7a99cd62016-11-28 09:48:48 +030034 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +020035 return NULL;
36}
37
38static void dsa_free_dst(struct kref *ref)
39{
40 struct dsa_switch_tree *dst = container_of(ref, struct dsa_switch_tree,
41 refcount);
42
43 list_del(&dst->list);
44 kfree(dst);
45}
46
47static void dsa_put_dst(struct dsa_switch_tree *dst)
48{
49 kref_put(&dst->refcount, dsa_free_dst);
50}
51
52static struct dsa_switch_tree *dsa_add_dst(u32 tree)
53{
54 struct dsa_switch_tree *dst;
55
56 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
57 if (!dst)
58 return NULL;
59 dst->tree = tree;
60 dst->cpu_switch = -1;
61 INIT_LIST_HEAD(&dst->list);
62 list_add_tail(&dsa_switch_trees, &dst->list);
63 kref_init(&dst->refcount);
64
65 return dst;
66}
67
68static void dsa_dst_add_ds(struct dsa_switch_tree *dst,
69 struct dsa_switch *ds, u32 index)
70{
71 kref_get(&dst->refcount);
72 dst->ds[index] = ds;
73}
74
75static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
76 struct dsa_switch *ds, u32 index)
77{
78 dst->ds[index] = NULL;
79 kref_put(&dst->refcount, dsa_free_dst);
80}
81
82static bool dsa_port_is_dsa(struct device_node *port)
83{
Vivien Didelot9f914842017-01-09 18:13:51 -050084 return !!of_parse_phandle(port, "link", 0);
Andrew Lunn83c0afa2016-06-04 21:17:07 +020085}
86
87static bool dsa_port_is_cpu(struct device_node *port)
88{
Vivien Didelot9f914842017-01-09 18:13:51 -050089 return !!of_parse_phandle(port, "ethernet", 0);
Andrew Lunn83c0afa2016-06-04 21:17:07 +020090}
91
92static bool dsa_ds_find_port(struct dsa_switch *ds,
93 struct device_node *port)
94{
95 u32 index;
96
97 for (index = 0; index < DSA_MAX_PORTS; index++)
98 if (ds->ports[index].dn == port)
99 return true;
100 return false;
101}
102
103static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
104 struct device_node *port)
105{
106 struct dsa_switch *ds;
107 u32 index;
108
109 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
110 ds = dst->ds[index];
111 if (!ds)
112 continue;
113
114 if (dsa_ds_find_port(ds, port))
115 return ds;
116 }
117
118 return NULL;
119}
120
121static int dsa_port_complete(struct dsa_switch_tree *dst,
122 struct dsa_switch *src_ds,
123 struct device_node *port,
124 u32 src_port)
125{
126 struct device_node *link;
127 int index;
128 struct dsa_switch *dst_ds;
129
130 for (index = 0;; index++) {
131 link = of_parse_phandle(port, "link", index);
132 if (!link)
133 break;
134
135 dst_ds = dsa_dst_find_port(dst, link);
136 of_node_put(link);
137
138 if (!dst_ds)
139 return 1;
140
141 src_ds->rtable[dst_ds->index] = src_port;
142 }
143
144 return 0;
145}
146
147/* A switch is complete if all the DSA ports phandles point to ports
148 * known in the tree. A return value of 1 means the tree is not
149 * complete. This is not an error condition. A value of 0 is
150 * success.
151 */
152static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
153{
154 struct device_node *port;
155 u32 index;
156 int err;
157
158 for (index = 0; index < DSA_MAX_PORTS; index++) {
159 port = ds->ports[index].dn;
160 if (!port)
161 continue;
162
163 if (!dsa_port_is_dsa(port))
164 continue;
165
166 err = dsa_port_complete(dst, ds, port, index);
167 if (err != 0)
168 return err;
169
170 ds->dsa_port_mask |= BIT(index);
171 }
172
173 return 0;
174}
175
176/* A tree is complete if all the DSA ports phandles point to ports
177 * known in the tree. A return value of 1 means the tree is not
178 * complete. This is not an error condition. A value of 0 is
179 * success.
180 */
181static int dsa_dst_complete(struct dsa_switch_tree *dst)
182{
183 struct dsa_switch *ds;
184 u32 index;
185 int err;
186
187 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
188 ds = dst->ds[index];
189 if (!ds)
190 continue;
191
192 err = dsa_ds_complete(dst, ds);
193 if (err != 0)
194 return err;
195 }
196
197 return 0;
198}
199
200static int dsa_dsa_port_apply(struct device_node *port, u32 index,
201 struct dsa_switch *ds)
202{
203 int err;
204
205 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
206 if (err) {
207 dev_warn(ds->dev, "Failed to setup dsa port %d: %d\n",
208 index, err);
209 return err;
210 }
211
212 return 0;
213}
214
215static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
216 struct dsa_switch *ds)
217{
218 dsa_cpu_dsa_destroy(port);
219}
220
221static int dsa_cpu_port_apply(struct device_node *port, u32 index,
222 struct dsa_switch *ds)
223{
224 int err;
225
226 err = dsa_cpu_dsa_setup(ds, ds->dev, port, index);
227 if (err) {
228 dev_warn(ds->dev, "Failed to setup cpu port %d: %d\n",
229 index, err);
230 return err;
231 }
232
233 ds->cpu_port_mask |= BIT(index);
234
235 return 0;
236}
237
238static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
239 struct dsa_switch *ds)
240{
241 dsa_cpu_dsa_destroy(port);
242 ds->cpu_port_mask &= ~BIT(index);
243
244}
245
246static int dsa_user_port_apply(struct device_node *port, u32 index,
247 struct dsa_switch *ds)
248{
249 const char *name;
250 int err;
251
252 name = of_get_property(port, "label", NULL);
Vivien Didelot9f914842017-01-09 18:13:51 -0500253 if (!name)
254 name = "eth%d";
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200255
256 err = dsa_slave_create(ds, ds->dev, index, name);
257 if (err) {
258 dev_warn(ds->dev, "Failed to create slave %d: %d\n",
259 index, err);
260 return err;
261 }
262
263 return 0;
264}
265
266static void dsa_user_port_unapply(struct device_node *port, u32 index,
267 struct dsa_switch *ds)
268{
269 if (ds->ports[index].netdev) {
270 dsa_slave_destroy(ds->ports[index].netdev);
271 ds->ports[index].netdev = NULL;
Florian Fainelli6e830d82016-06-07 16:32:39 -0700272 ds->enabled_port_mask &= ~(1 << index);
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200273 }
274}
275
276static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
277{
278 struct device_node *port;
279 u32 index;
280 int err;
281
Florian Fainelli6e830d82016-06-07 16:32:39 -0700282 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
Vivien Didelot9d490b42016-08-23 12:38:56 -0400283 * driver and before ops->setup() has run, since the switch drivers and
Florian Fainelli6e830d82016-06-07 16:32:39 -0700284 * the slave MDIO bus driver rely on these values for probing PHY
285 * devices or not
286 */
287 ds->phys_mii_mask = ds->enabled_port_mask;
288
Vivien Didelot9d490b42016-08-23 12:38:56 -0400289 err = ds->ops->setup(ds);
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200290 if (err < 0)
291 return err;
292
John Crispin092183d2016-09-19 15:28:01 +0200293 if (ds->ops->set_addr) {
294 err = ds->ops->set_addr(ds, dst->master_netdev->dev_addr);
295 if (err < 0)
296 return err;
297 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200298
Vivien Didelot9d490b42016-08-23 12:38:56 -0400299 if (!ds->slave_mii_bus && ds->ops->phy_read) {
Florian Fainelli1eb59442016-06-07 16:32:40 -0700300 ds->slave_mii_bus = devm_mdiobus_alloc(ds->dev);
301 if (!ds->slave_mii_bus)
302 return -ENOMEM;
303
304 dsa_slave_mii_bus_init(ds);
305
306 err = mdiobus_register(ds->slave_mii_bus);
307 if (err < 0)
308 return err;
309 }
310
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200311 for (index = 0; index < DSA_MAX_PORTS; index++) {
312 port = ds->ports[index].dn;
313 if (!port)
314 continue;
315
316 if (dsa_port_is_dsa(port)) {
317 err = dsa_dsa_port_apply(port, index, ds);
318 if (err)
319 return err;
320 continue;
321 }
322
323 if (dsa_port_is_cpu(port)) {
324 err = dsa_cpu_port_apply(port, index, ds);
325 if (err)
326 return err;
327 continue;
328 }
329
330 err = dsa_user_port_apply(port, index, ds);
331 if (err)
332 continue;
333 }
334
335 return 0;
336}
337
338static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
339{
340 struct device_node *port;
341 u32 index;
342
343 for (index = 0; index < DSA_MAX_PORTS; index++) {
344 port = ds->ports[index].dn;
345 if (!port)
346 continue;
347
348 if (dsa_port_is_dsa(port)) {
349 dsa_dsa_port_unapply(port, index, ds);
350 continue;
351 }
352
353 if (dsa_port_is_cpu(port)) {
354 dsa_cpu_port_unapply(port, index, ds);
355 continue;
356 }
357
358 dsa_user_port_unapply(port, index, ds);
359 }
Florian Fainelli1eb59442016-06-07 16:32:40 -0700360
Vivien Didelot9d490b42016-08-23 12:38:56 -0400361 if (ds->slave_mii_bus && ds->ops->phy_read)
Florian Fainelli1eb59442016-06-07 16:32:40 -0700362 mdiobus_unregister(ds->slave_mii_bus);
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200363}
364
365static int dsa_dst_apply(struct dsa_switch_tree *dst)
366{
367 struct dsa_switch *ds;
368 u32 index;
369 int err;
370
371 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
372 ds = dst->ds[index];
373 if (!ds)
374 continue;
375
376 err = dsa_ds_apply(dst, ds);
377 if (err)
378 return err;
379 }
380
Florian Fainelli0c73c522016-06-07 16:32:42 -0700381 err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
382 if (err)
383 return err;
384
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200385 /* If we use a tagging format that doesn't have an ethertype
386 * field, make sure that all packets from this point on get
387 * sent to the tag format's receive function.
388 */
389 wmb();
390 dst->master_netdev->dsa_ptr = (void *)dst;
391 dst->applied = true;
392
393 return 0;
394}
395
396static void dsa_dst_unapply(struct dsa_switch_tree *dst)
397{
398 struct dsa_switch *ds;
399 u32 index;
400
401 if (!dst->applied)
402 return;
403
404 dst->master_netdev->dsa_ptr = NULL;
405
406 /* If we used a tagging format that doesn't have an ethertype
407 * field, make sure that all packets from this point get sent
408 * without the tag and go through the regular receive path.
409 */
410 wmb();
411
412 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
413 ds = dst->ds[index];
414 if (!ds)
415 continue;
416
417 dsa_ds_unapply(dst, ds);
418 }
419
Florian Fainelli0c73c522016-06-07 16:32:42 -0700420 dsa_cpu_port_ethtool_restore(dst->ds[0]);
421
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200422 pr_info("DSA: tree %d unapplied\n", dst->tree);
423 dst->applied = false;
424}
425
426static int dsa_cpu_parse(struct device_node *port, u32 index,
427 struct dsa_switch_tree *dst,
428 struct dsa_switch *ds)
429{
Andrew Lunn7b314362016-08-22 16:01:01 +0200430 enum dsa_tag_protocol tag_protocol;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200431 struct net_device *ethernet_dev;
432 struct device_node *ethernet;
433
434 ethernet = of_parse_phandle(port, "ethernet", 0);
435 if (!ethernet)
436 return -EINVAL;
437
438 ethernet_dev = of_find_net_device_by_node(ethernet);
439 if (!ethernet_dev)
440 return -EPROBE_DEFER;
441
442 if (!ds->master_netdev)
443 ds->master_netdev = ethernet_dev;
444
445 if (!dst->master_netdev)
446 dst->master_netdev = ethernet_dev;
447
448 if (dst->cpu_switch == -1) {
449 dst->cpu_switch = ds->index;
450 dst->cpu_port = index;
451 }
452
Vivien Didelot9d490b42016-08-23 12:38:56 -0400453 tag_protocol = ds->ops->get_tag_protocol(ds);
Andrew Lunn7b314362016-08-22 16:01:01 +0200454 dst->tag_ops = dsa_resolve_tag_protocol(tag_protocol);
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200455 if (IS_ERR(dst->tag_ops)) {
456 dev_warn(ds->dev, "No tagger for this switch\n");
457 return PTR_ERR(dst->tag_ops);
458 }
459
460 dst->rcv = dst->tag_ops->rcv;
461
462 return 0;
463}
464
465static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
466{
467 struct device_node *port;
468 u32 index;
469 int err;
470
471 for (index = 0; index < DSA_MAX_PORTS; index++) {
472 port = ds->ports[index].dn;
473 if (!port)
474 continue;
475
476 if (dsa_port_is_cpu(port)) {
477 err = dsa_cpu_parse(port, index, dst, ds);
478 if (err)
479 return err;
480 }
481 }
482
483 pr_info("DSA: switch %d %d parsed\n", dst->tree, ds->index);
484
485 return 0;
486}
487
488static int dsa_dst_parse(struct dsa_switch_tree *dst)
489{
490 struct dsa_switch *ds;
491 u32 index;
492 int err;
493
494 for (index = 0; index < DSA_MAX_SWITCHES; index++) {
495 ds = dst->ds[index];
496 if (!ds)
497 continue;
498
499 err = dsa_ds_parse(dst, ds);
500 if (err)
501 return err;
502 }
503
504 if (!dst->master_netdev) {
505 pr_warn("Tree has no master device\n");
506 return -EINVAL;
507 }
508
509 pr_info("DSA: tree %d parsed\n", dst->tree);
510
511 return 0;
512}
513
514static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
515{
516 struct device_node *port;
517 int err;
518 u32 reg;
519
520 for_each_available_child_of_node(ports, port) {
521 err = of_property_read_u32(port, "reg", &reg);
522 if (err)
523 return err;
524
525 if (reg >= DSA_MAX_PORTS)
526 return -EINVAL;
527
528 ds->ports[reg].dn = port;
Florian Fainelli6e830d82016-06-07 16:32:39 -0700529
Vivien Didelot9d490b42016-08-23 12:38:56 -0400530 /* Initialize enabled_port_mask now for ops->setup()
Florian Fainelli6e830d82016-06-07 16:32:39 -0700531 * to have access to a correct value, just like what
532 * net/dsa/dsa.c::dsa_switch_setup_one does.
533 */
534 if (!dsa_port_is_cpu(port))
535 ds->enabled_port_mask |= 1 << reg;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200536 }
537
538 return 0;
539}
540
541static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
542{
543 int err;
544
545 *tree = *index = 0;
546
547 err = of_property_read_u32_index(np, "dsa,member", 0, tree);
548 if (err) {
549 /* Does not exist, but it is optional */
550 if (err == -EINVAL)
551 return 0;
552 return err;
553 }
554
555 err = of_property_read_u32_index(np, "dsa,member", 1, index);
556 if (err)
557 return err;
558
559 if (*index >= DSA_MAX_SWITCHES)
560 return -EINVAL;
561
562 return 0;
563}
564
565static struct device_node *dsa_get_ports(struct dsa_switch *ds,
566 struct device_node *np)
567{
568 struct device_node *ports;
569
570 ports = of_get_child_by_name(np, "ports");
571 if (!ports) {
572 dev_err(ds->dev, "no ports child node found\n");
573 return ERR_PTR(-EINVAL);
574 }
575
576 return ports;
577}
578
579static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
580{
581 struct device_node *ports = dsa_get_ports(ds, np);
582 struct dsa_switch_tree *dst;
583 u32 tree, index;
Vivien Didelotd3902382016-07-06 20:03:54 -0400584 int i, err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200585
586 err = dsa_parse_member(np, &tree, &index);
587 if (err)
588 return err;
589
590 if (IS_ERR(ports))
591 return PTR_ERR(ports);
592
593 err = dsa_parse_ports_dn(ports, ds);
594 if (err)
595 return err;
596
597 dst = dsa_get_dst(tree);
598 if (!dst) {
599 dst = dsa_add_dst(tree);
600 if (!dst)
601 return -ENOMEM;
602 }
603
604 if (dst->ds[index]) {
605 err = -EBUSY;
606 goto out;
607 }
608
609 ds->dst = dst;
610 ds->index = index;
Vivien Didelotd3902382016-07-06 20:03:54 -0400611
612 /* Initialize the routing table */
613 for (i = 0; i < DSA_MAX_SWITCHES; ++i)
614 ds->rtable[i] = DSA_RTABLE_NONE;
615
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200616 dsa_dst_add_ds(dst, ds, index);
617
618 err = dsa_dst_complete(dst);
619 if (err < 0)
620 goto out_del_dst;
621
622 if (err == 1) {
623 /* Not all switches registered yet */
624 err = 0;
625 goto out;
626 }
627
628 if (dst->applied) {
629 pr_info("DSA: Disjoint trees?\n");
630 return -EINVAL;
631 }
632
633 err = dsa_dst_parse(dst);
Volodymyr Bendiuga5e6eb452017-01-05 11:10:13 +0100634 if (err) {
635 if (err == -EPROBE_DEFER) {
636 dsa_dst_del_ds(dst, ds, ds->index);
637 return err;
638 }
639
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200640 goto out_del_dst;
Volodymyr Bendiuga5e6eb452017-01-05 11:10:13 +0100641 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200642
643 err = dsa_dst_apply(dst);
644 if (err) {
645 dsa_dst_unapply(dst);
646 goto out_del_dst;
647 }
648
649 dsa_put_dst(dst);
650 return 0;
651
652out_del_dst:
653 dsa_dst_del_ds(dst, ds, ds->index);
654out:
655 dsa_put_dst(dst);
656
657 return err;
658}
659
660int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
661{
662 int err;
663
664 mutex_lock(&dsa2_mutex);
665 err = _dsa_register_switch(ds, np);
666 mutex_unlock(&dsa2_mutex);
667
668 return err;
669}
670EXPORT_SYMBOL_GPL(dsa_register_switch);
671
Wei Yongjun85c22ba2016-07-12 15:24:10 +0000672static void _dsa_unregister_switch(struct dsa_switch *ds)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200673{
674 struct dsa_switch_tree *dst = ds->dst;
675
676 dsa_dst_unapply(dst);
677
678 dsa_dst_del_ds(dst, ds, ds->index);
679}
680
681void dsa_unregister_switch(struct dsa_switch *ds)
682{
683 mutex_lock(&dsa2_mutex);
684 _dsa_unregister_switch(ds);
685 mutex_unlock(&dsa2_mutex);
686}
687EXPORT_SYMBOL_GPL(dsa_unregister_switch);