blob: dcad3100b164637fd581d70ba20e4b31ae6e8a6b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Andrew Lunn83c0afa2016-06-04 21:17:07 +02002/*
3 * net/dsa/dsa2.c - Hardware switch handling, binding version 2
4 * Copyright (c) 2008-2009 Marvell Semiconductor
5 * Copyright (c) 2013 Florian Fainelli <florian@openwrt.org>
6 * Copyright (c) 2016 Andrew Lunn <andrew@lunn.ch>
Andrew Lunn83c0afa2016-06-04 21:17:07 +02007 */
8
9#include <linux/device.h>
10#include <linux/err.h>
11#include <linux/list.h>
Andrew Lunnc6e970a2017-03-28 23:45:06 +020012#include <linux/netdevice.h>
Andrew Lunn83c0afa2016-06-04 21:17:07 +020013#include <linux/slab.h>
14#include <linux/rtnetlink.h>
Andrew Lunn83c0afa2016-06-04 21:17:07 +020015#include <linux/of.h>
16#include <linux/of_net.h>
Jiri Pirko402f99e52019-03-24 11:14:26 +010017#include <net/devlink.h>
Vivien Didelotea5dd342017-05-17 15:46:03 -040018
Andrew Lunn83c0afa2016-06-04 21:17:07 +020019#include "dsa_priv.h"
20
Andrew Lunn83c0afa2016-06-04 21:17:07 +020021static DEFINE_MUTEX(dsa2_mutex);
Vladimir Olteanbff33f72020-03-27 21:55:43 +020022LIST_HEAD(dsa_tree_list);
Andrew Lunn83c0afa2016-06-04 21:17:07 +020023
Vladimir Olteanf5e165e2021-08-19 20:55:00 +030024/* Track the bridges with forwarding offload enabled */
25static unsigned long dsa_fwd_offloading_bridges;
26
Tobias Waldekranz058102a2021-01-13 09:42:53 +010027/**
Vladimir Oltean886f8e22021-01-29 03:00:04 +020028 * dsa_tree_notify - Execute code for all switches in a DSA switch tree.
29 * @dst: collection of struct dsa_switch devices to notify.
30 * @e: event, must be of type DSA_NOTIFIER_*
31 * @v: event-specific value.
32 *
33 * Given a struct dsa_switch_tree, this can be used to run a function once for
34 * each member DSA switch. The other alternative of traversing the tree is only
35 * through its ports list, which does not uniquely list the switches.
36 */
37int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v)
38{
39 struct raw_notifier_head *nh = &dst->nh;
40 int err;
41
42 err = raw_notifier_call_chain(nh, e, v);
43
44 return notifier_to_errno(err);
45}
46
47/**
48 * dsa_broadcast - Notify all DSA trees in the system.
49 * @e: event, must be of type DSA_NOTIFIER_*
50 * @v: event-specific value.
51 *
52 * Can be used to notify the switching fabric of events such as cross-chip
53 * bridging between disjoint trees (such as islands of tagger-compatible
54 * switches bridged by an incompatible middle switch).
Vladimir Oltean724395f2021-08-11 16:46:06 +030055 *
56 * WARNING: this function is not reliable during probe time, because probing
57 * between trees is asynchronous and not all DSA trees might have probed.
Vladimir Oltean886f8e22021-01-29 03:00:04 +020058 */
59int dsa_broadcast(unsigned long e, void *v)
60{
61 struct dsa_switch_tree *dst;
62 int err = 0;
63
64 list_for_each_entry(dst, &dsa_tree_list, list) {
65 err = dsa_tree_notify(dst, e, v);
66 if (err)
67 break;
68 }
69
70 return err;
71}
72
73/**
Tobias Waldekranz058102a2021-01-13 09:42:53 +010074 * dsa_lag_map() - Map LAG netdev to a linear LAG ID
75 * @dst: Tree in which to record the mapping.
76 * @lag: Netdev that is to be mapped to an ID.
77 *
78 * dsa_lag_id/dsa_lag_dev can then be used to translate between the
79 * two spaces. The size of the mapping space is determined by the
80 * driver by setting ds->num_lag_ids. It is perfectly legal to leave
81 * it unset if it is not needed, in which case these functions become
82 * no-ops.
83 */
84void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag)
85{
86 unsigned int id;
87
88 if (dsa_lag_id(dst, lag) >= 0)
89 /* Already mapped */
90 return;
91
92 for (id = 0; id < dst->lags_len; id++) {
93 if (!dsa_lag_dev(dst, id)) {
94 dst->lags[id] = lag;
95 return;
96 }
97 }
98
99 /* No IDs left, which is OK. Some drivers do not need it. The
100 * ones that do, e.g. mv88e6xxx, will discover that dsa_lag_id
101 * returns an error for this device when joining the LAG. The
102 * driver can then return -EOPNOTSUPP back to DSA, which will
103 * fall back to a software LAG.
104 */
105}
106
107/**
108 * dsa_lag_unmap() - Remove a LAG ID mapping
109 * @dst: Tree in which the mapping is recorded.
110 * @lag: Netdev that was mapped.
111 *
112 * As there may be multiple users of the mapping, it is only removed
113 * if there are no other references to it.
114 */
115void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag)
116{
117 struct dsa_port *dp;
118 unsigned int id;
119
120 dsa_lag_foreach_port(dp, dst, lag)
121 /* There are remaining users of this mapping */
122 return;
123
124 dsa_lags_foreach_id(id, dst) {
125 if (dsa_lag_dev(dst, id) == lag) {
126 dst->lags[id] = NULL;
127 break;
128 }
129 }
130}
131
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200132struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
133 const struct net_device *br)
134{
135 struct dsa_port *dp;
136
137 list_for_each_entry(dp, &dst->ports, list)
138 if (dsa_port_bridge_dev_get(dp) == br)
139 return dp->bridge;
140
141 return NULL;
142}
143
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300144static int dsa_bridge_num_find(const struct net_device *bridge_dev)
145{
146 struct dsa_switch_tree *dst;
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300147
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200148 list_for_each_entry(dst, &dsa_tree_list, list) {
149 struct dsa_bridge *bridge;
150
151 bridge = dsa_tree_bridge_find(dst, bridge_dev);
152 if (bridge)
153 return bridge->num;
154 }
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300155
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200156 return 0;
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300157}
158
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200159unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max)
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300160{
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200161 unsigned int bridge_num = dsa_bridge_num_find(bridge_dev);
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300162
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200163 /* Switches without FDB isolation support don't get unique
164 * bridge numbering
165 */
166 if (!max)
167 return 0;
168
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200169 if (!bridge_num) {
Vladimir Oltean947c8742021-12-06 18:57:48 +0200170 /* First port that requests FDB isolation or TX forwarding
171 * offload for this bridge
172 */
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200173 bridge_num = find_next_zero_bit(&dsa_fwd_offloading_bridges,
174 DSA_MAX_NUM_OFFLOADING_BRIDGES,
175 1);
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300176 if (bridge_num >= max)
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200177 return 0;
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300178
179 set_bit(bridge_num, &dsa_fwd_offloading_bridges);
180 }
181
182 return bridge_num;
183}
184
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200185void dsa_bridge_num_put(const struct net_device *bridge_dev,
186 unsigned int bridge_num)
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300187{
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200188 /* Since we refcount bridges, we know that when we call this function
189 * it is no longer in use, so we can just go ahead and remove it from
190 * the bit mask.
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300191 */
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200192 clear_bit(bridge_num, &dsa_fwd_offloading_bridges);
Vladimir Olteanf5e165e2021-08-19 20:55:00 +0300193}
194
Vladimir Oltean3b7bc1f2020-05-10 19:37:42 +0300195struct dsa_switch *dsa_switch_find(int tree_index, int sw_index)
196{
197 struct dsa_switch_tree *dst;
198 struct dsa_port *dp;
199
200 list_for_each_entry(dst, &dsa_tree_list, list) {
201 if (dst->index != tree_index)
202 continue;
203
204 list_for_each_entry(dp, &dst->ports, list) {
205 if (dp->ds->index != sw_index)
206 continue;
207
208 return dp->ds;
209 }
210 }
211
212 return NULL;
213}
214EXPORT_SYMBOL_GPL(dsa_switch_find);
215
Vivien Didelot1ca28ec2017-11-03 19:05:24 -0400216static struct dsa_switch_tree *dsa_tree_find(int index)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200217{
218 struct dsa_switch_tree *dst;
219
Vivien Didelot1ca28ec2017-11-03 19:05:24 -0400220 list_for_each_entry(dst, &dsa_tree_list, list)
Vivien Didelot8e5bf972017-11-03 19:05:22 -0400221 if (dst->index == index)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200222 return dst;
Vivien Didelot8e5bf972017-11-03 19:05:22 -0400223
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200224 return NULL;
225}
226
Vivien Didelot1ca28ec2017-11-03 19:05:24 -0400227static struct dsa_switch_tree *dsa_tree_alloc(int index)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200228{
229 struct dsa_switch_tree *dst;
230
231 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
232 if (!dst)
233 return NULL;
Vivien Didelot1ca28ec2017-11-03 19:05:24 -0400234
Vivien Didelot49463b72017-11-03 19:05:21 -0400235 dst->index = index;
Vivien Didelot1ca28ec2017-11-03 19:05:24 -0400236
Vivien Didelotc5f51762019-10-30 22:09:13 -0400237 INIT_LIST_HEAD(&dst->rtable);
238
Vivien Didelotab8ccae2019-10-21 16:51:16 -0400239 INIT_LIST_HEAD(&dst->ports);
240
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200241 INIT_LIST_HEAD(&dst->list);
Vivien Didelot50c7d2ba2019-10-18 17:02:46 -0400242 list_add_tail(&dst->list, &dsa_tree_list);
Vivien Didelot8e5bf972017-11-03 19:05:22 -0400243
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200244 kref_init(&dst->refcount);
245
246 return dst;
247}
248
Vivien Didelot65254102017-11-03 19:05:23 -0400249static void dsa_tree_free(struct dsa_switch_tree *dst)
250{
Vladimir Oltean7f297312021-12-14 03:45:36 +0200251 if (dst->tag_ops)
Vladimir Oltean357f2032021-01-29 03:00:05 +0200252 dsa_tag_driver_put(dst->tag_ops);
Vivien Didelot65254102017-11-03 19:05:23 -0400253 list_del(&dst->list);
254 kfree(dst);
255}
256
Vivien Didelot9e741042017-11-24 11:36:06 -0500257static struct dsa_switch_tree *dsa_tree_get(struct dsa_switch_tree *dst)
258{
259 if (dst)
260 kref_get(&dst->refcount);
261
262 return dst;
263}
264
Vivien Didelot1ca28ec2017-11-03 19:05:24 -0400265static struct dsa_switch_tree *dsa_tree_touch(int index)
266{
267 struct dsa_switch_tree *dst;
268
269 dst = dsa_tree_find(index);
Vivien Didelot9e741042017-11-24 11:36:06 -0500270 if (dst)
271 return dsa_tree_get(dst);
272 else
273 return dsa_tree_alloc(index);
Vivien Didelot65254102017-11-03 19:05:23 -0400274}
275
276static void dsa_tree_release(struct kref *ref)
277{
278 struct dsa_switch_tree *dst;
279
280 dst = container_of(ref, struct dsa_switch_tree, refcount);
281
282 dsa_tree_free(dst);
283}
284
285static void dsa_tree_put(struct dsa_switch_tree *dst)
286{
Vivien Didelot9e741042017-11-24 11:36:06 -0500287 if (dst)
288 kref_put(&dst->refcount, dsa_tree_release);
Vivien Didelot65254102017-11-03 19:05:23 -0400289}
290
Vivien Didelotf163da82017-11-06 16:11:49 -0500291static struct dsa_port *dsa_tree_find_port_by_node(struct dsa_switch_tree *dst,
292 struct device_node *dn)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200293{
Vivien Didelotf163da82017-11-06 16:11:49 -0500294 struct dsa_port *dp;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200295
Vivien Didelot764b7e62019-10-21 16:51:21 -0400296 list_for_each_entry(dp, &dst->ports, list)
297 if (dp->dn == dn)
298 return dp;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200299
300 return NULL;
301}
302
Ben Dooks (Codethink)4e2ce6e2019-12-17 11:20:38 +0000303static struct dsa_link *dsa_link_touch(struct dsa_port *dp,
304 struct dsa_port *link_dp)
Vivien Didelotc5f51762019-10-30 22:09:13 -0400305{
306 struct dsa_switch *ds = dp->ds;
307 struct dsa_switch_tree *dst;
308 struct dsa_link *dl;
309
310 dst = ds->dst;
311
312 list_for_each_entry(dl, &dst->rtable, list)
313 if (dl->dp == dp && dl->link_dp == link_dp)
314 return dl;
315
316 dl = kzalloc(sizeof(*dl), GFP_KERNEL);
317 if (!dl)
318 return NULL;
319
320 dl->dp = dp;
321 dl->link_dp = link_dp;
322
323 INIT_LIST_HEAD(&dl->list);
324 list_add_tail(&dl->list, &dst->rtable);
325
326 return dl;
327}
328
Vivien Didelot34c09a82017-11-06 16:11:51 -0500329static bool dsa_port_setup_routing_table(struct dsa_port *dp)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200330{
Vivien Didelot34c09a82017-11-06 16:11:51 -0500331 struct dsa_switch *ds = dp->ds;
332 struct dsa_switch_tree *dst = ds->dst;
333 struct device_node *dn = dp->dn;
Vivien Didelotc5286662017-11-06 16:11:50 -0500334 struct of_phandle_iterator it;
Vivien Didelotf163da82017-11-06 16:11:49 -0500335 struct dsa_port *link_dp;
Vivien Didelotc5f51762019-10-30 22:09:13 -0400336 struct dsa_link *dl;
Vivien Didelotc5286662017-11-06 16:11:50 -0500337 int err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200338
Vivien Didelotc5286662017-11-06 16:11:50 -0500339 of_for_each_phandle(&it, err, dn, "link", NULL, 0) {
340 link_dp = dsa_tree_find_port_by_node(dst, it.node);
341 if (!link_dp) {
342 of_node_put(it.node);
Vivien Didelot34c09a82017-11-06 16:11:51 -0500343 return false;
Vivien Didelotc5286662017-11-06 16:11:50 -0500344 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200345
Vivien Didelotc5f51762019-10-30 22:09:13 -0400346 dl = dsa_link_touch(dp, link_dp);
347 if (!dl) {
348 of_node_put(it.node);
349 return false;
350 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200351 }
352
Vivien Didelot34c09a82017-11-06 16:11:51 -0500353 return true;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200354}
355
Vivien Didelot3774ecd2019-10-30 22:09:15 -0400356static bool dsa_tree_setup_routing_table(struct dsa_switch_tree *dst)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200357{
Vivien Didelot34c09a82017-11-06 16:11:51 -0500358 bool complete = true;
359 struct dsa_port *dp;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200360
Vivien Didelot86bfb2c2019-10-21 16:51:20 -0400361 list_for_each_entry(dp, &dst->ports, list) {
Vivien Didelot3774ecd2019-10-30 22:09:15 -0400362 if (dsa_port_is_dsa(dp)) {
Vivien Didelot34c09a82017-11-06 16:11:51 -0500363 complete = dsa_port_setup_routing_table(dp);
364 if (!complete)
365 break;
366 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200367 }
368
Vivien Didelot34c09a82017-11-06 16:11:51 -0500369 return complete;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200370}
371
Vivien Didelotf0704642017-11-06 16:11:44 -0500372static struct dsa_port *dsa_tree_find_first_cpu(struct dsa_switch_tree *dst)
373{
Vivien Didelotf0704642017-11-06 16:11:44 -0500374 struct dsa_port *dp;
Vivien Didelotf0704642017-11-06 16:11:44 -0500375
Vivien Didelotc0b73622019-10-21 16:51:23 -0400376 list_for_each_entry(dp, &dst->ports, list)
377 if (dsa_port_is_cpu(dp))
378 return dp;
Vivien Didelotf0704642017-11-06 16:11:44 -0500379
380 return NULL;
381}
382
Vladimir Oltean2c0b0322021-08-04 16:54:30 +0300383/* Assign the default CPU port (the first one in the tree) to all ports of the
384 * fabric which don't already have one as part of their own switch.
385 */
Vivien Didelotf0704642017-11-06 16:11:44 -0500386static int dsa_tree_setup_default_cpu(struct dsa_switch_tree *dst)
387{
Vivien Didelotda4561c2019-10-21 16:51:24 -0400388 struct dsa_port *cpu_dp, *dp;
Vivien Didelotf0704642017-11-06 16:11:44 -0500389
Vivien Didelotda4561c2019-10-21 16:51:24 -0400390 cpu_dp = dsa_tree_find_first_cpu(dst);
391 if (!cpu_dp) {
392 pr_err("DSA: tree %d has no CPU port\n", dst->index);
Vivien Didelotf0704642017-11-06 16:11:44 -0500393 return -EINVAL;
394 }
395
Vladimir Oltean2c0b0322021-08-04 16:54:30 +0300396 list_for_each_entry(dp, &dst->ports, list) {
397 if (dp->cpu_dp)
398 continue;
399
Vivien Didelotda4561c2019-10-21 16:51:24 -0400400 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
401 dp->cpu_dp = cpu_dp;
Vladimir Oltean2c0b0322021-08-04 16:54:30 +0300402 }
Vivien Didelotf0704642017-11-06 16:11:44 -0500403
404 return 0;
405}
406
Vladimir Oltean2c0b0322021-08-04 16:54:30 +0300407/* Perform initial assignment of CPU ports to user ports and DSA links in the
408 * fabric, giving preference to CPU ports local to each switch. Default to
409 * using the first CPU port in the switch tree if the port does not have a CPU
410 * port local to this switch.
411 */
412static int dsa_tree_setup_cpu_ports(struct dsa_switch_tree *dst)
413{
414 struct dsa_port *cpu_dp, *dp;
415
416 list_for_each_entry(cpu_dp, &dst->ports, list) {
417 if (!dsa_port_is_cpu(cpu_dp))
418 continue;
419
Vladimir Oltean65c563a2021-10-20 20:49:51 +0300420 /* Prefer a local CPU port */
421 dsa_switch_for_each_port(dp, cpu_dp->ds) {
Vladimir Oltean2c0b0322021-08-04 16:54:30 +0300422 /* Prefer the first local CPU port found */
423 if (dp->cpu_dp)
424 continue;
425
426 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
427 dp->cpu_dp = cpu_dp;
428 }
429 }
430
431 return dsa_tree_setup_default_cpu(dst);
432}
433
Vladimir Oltean0e8eb9a2021-08-04 16:54:29 +0300434static void dsa_tree_teardown_cpu_ports(struct dsa_switch_tree *dst)
Vivien Didelotf0704642017-11-06 16:11:44 -0500435{
Vivien Didelotda4561c2019-10-21 16:51:24 -0400436 struct dsa_port *dp;
437
438 list_for_each_entry(dp, &dst->ports, list)
439 if (dsa_port_is_user(dp) || dsa_port_is_dsa(dp))
440 dp->cpu_dp = NULL;
Vivien Didelotf0704642017-11-06 16:11:44 -0500441}
442
Vivien Didelot1d277322017-11-06 16:11:48 -0500443static int dsa_port_setup(struct dsa_port *dp)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200444{
Vivien Didelot955222c2019-08-19 16:00:48 -0400445 struct devlink_port *dlp = &dp->devlink_port;
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300446 bool dsa_port_link_registered = false;
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300447 struct dsa_switch *ds = dp->ds;
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300448 bool dsa_port_enabled = false;
449 int err = 0;
Andrew Lunn96567d52017-03-28 23:45:07 +0200450
Vivien Didelotfb35c602019-10-21 16:51:19 -0400451 if (dp->setup)
452 return 0;
453
Vladimir Oltean338a3a472021-10-24 20:17:54 +0300454 mutex_init(&dp->addr_lists_lock);
Vladimir Oltean3f6e32f2021-06-29 17:06:52 +0300455 INIT_LIST_HEAD(&dp->fdbs);
Vladimir Oltean161ca592021-06-29 17:06:50 +0300456 INIT_LIST_HEAD(&dp->mdbs);
457
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300458 if (ds->ops->port_setup) {
459 err = ds->ops->port_setup(ds, dp->index);
460 if (err)
461 return err;
462 }
463
Vivien Didelot1d277322017-11-06 16:11:48 -0500464 switch (dp->type) {
465 case DSA_PORT_TYPE_UNUSED:
Vivien Didelot0394a632019-08-19 16:00:50 -0400466 dsa_port_disable(dp);
Vivien Didelot1d277322017-11-06 16:11:48 -0500467 break;
468 case DSA_PORT_TYPE_CPU:
Jiri Pirkoda077392018-05-18 09:29:03 +0200469 err = dsa_port_link_register_of(dp);
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300470 if (err)
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300471 break;
472 dsa_port_link_registered = true;
Vivien Didelot0394a632019-08-19 16:00:50 -0400473
474 err = dsa_port_enable(dp, NULL);
475 if (err)
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300476 break;
477 dsa_port_enabled = true;
478
Jiri Pirkoda077392018-05-18 09:29:03 +0200479 break;
Vivien Didelot1d277322017-11-06 16:11:48 -0500480 case DSA_PORT_TYPE_DSA:
Sebastian Reichel33615362018-01-23 16:03:46 +0100481 err = dsa_port_link_register_of(dp);
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300482 if (err)
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300483 break;
484 dsa_port_link_registered = true;
Vivien Didelot0394a632019-08-19 16:00:50 -0400485
486 err = dsa_port_enable(dp, NULL);
487 if (err)
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300488 break;
489 dsa_port_enabled = true;
490
Vivien Didelot1d277322017-11-06 16:11:48 -0500491 break;
492 case DSA_PORT_TYPE_USER:
Michael Walle83216e32021-04-12 19:47:17 +0200493 of_get_mac_address(dp->dn, dp->mac);
Vivien Didelot1d277322017-11-06 16:11:48 -0500494 err = dsa_slave_create(dp);
495 if (err)
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300496 break;
Vivien Didelot955222c2019-08-19 16:00:48 -0400497
498 devlink_port_type_eth_set(dlp, dp->slave);
Vivien Didelot1d277322017-11-06 16:11:48 -0500499 break;
500 }
Andrew Lunn96567d52017-03-28 23:45:07 +0200501
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300502 if (err && dsa_port_enabled)
503 dsa_port_disable(dp);
504 if (err && dsa_port_link_registered)
505 dsa_port_link_unregister_of(dp);
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300506 if (err) {
507 if (ds->ops->port_teardown)
508 ds->ops->port_teardown(ds, dp->index);
Vivien Didelotfb35c602019-10-21 16:51:19 -0400509 return err;
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300510 }
Vladimir Oltean4ba0ebb2019-08-31 15:46:19 +0300511
Vivien Didelotfb35c602019-10-21 16:51:19 -0400512 dp->setup = true;
513
514 return 0;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200515}
516
Andrew Lunn31224332020-10-04 18:12:53 +0200517static int dsa_port_devlink_setup(struct dsa_port *dp)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200518{
Vivien Didelot955222c2019-08-19 16:00:48 -0400519 struct devlink_port *dlp = &dp->devlink_port;
Andrew Lunn31224332020-10-04 18:12:53 +0200520 struct dsa_switch_tree *dst = dp->ds->dst;
521 struct devlink_port_attrs attrs = {};
522 struct devlink *dl = dp->ds->devlink;
523 const unsigned char *id;
524 unsigned char len;
525 int err;
Vivien Didelot1d277322017-11-06 16:11:48 -0500526
Andrew Lunn31224332020-10-04 18:12:53 +0200527 id = (const unsigned char *)&dst->index;
528 len = sizeof(dst->index);
529
530 attrs.phys.port_number = dp->index;
531 memcpy(attrs.switch_id.id, id, len);
532 attrs.switch_id.id_len = len;
533 memset(dlp, 0, sizeof(*dlp));
534
535 switch (dp->type) {
536 case DSA_PORT_TYPE_UNUSED:
537 attrs.flavour = DEVLINK_PORT_FLAVOUR_UNUSED;
538 break;
539 case DSA_PORT_TYPE_CPU:
540 attrs.flavour = DEVLINK_PORT_FLAVOUR_CPU;
541 break;
542 case DSA_PORT_TYPE_DSA:
543 attrs.flavour = DEVLINK_PORT_FLAVOUR_DSA;
544 break;
545 case DSA_PORT_TYPE_USER:
546 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
547 break;
548 }
549
550 devlink_port_attrs_set(dlp, &attrs);
551 err = devlink_port_register(dl, dlp, dp->index);
552
553 if (!err)
554 dp->devlink_port_setup = true;
555
556 return err;
557}
558
559static void dsa_port_teardown(struct dsa_port *dp)
560{
Vladimir Oltean91158e12021-01-12 02:48:31 +0200561 struct devlink_port *dlp = &dp->devlink_port;
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300562 struct dsa_switch *ds = dp->ds;
Vladimir Oltean161ca592021-06-29 17:06:50 +0300563 struct dsa_mac_addr *a, *tmp;
Vladimir Oltean11fd6672022-01-06 01:11:17 +0200564 struct net_device *slave;
Vladimir Oltean91158e12021-01-12 02:48:31 +0200565
Vivien Didelotfb35c602019-10-21 16:51:19 -0400566 if (!dp->setup)
567 return;
568
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300569 if (ds->ops->port_teardown)
570 ds->ops->port_teardown(ds, dp->index);
571
Vladimir Oltean91158e12021-01-12 02:48:31 +0200572 devlink_port_type_clear(dlp);
573
Vivien Didelot1d277322017-11-06 16:11:48 -0500574 switch (dp->type) {
575 case DSA_PORT_TYPE_UNUSED:
576 break;
577 case DSA_PORT_TYPE_CPU:
Vivien Didelot0394a632019-08-19 16:00:50 -0400578 dsa_port_disable(dp);
Vivien Didelot955222c2019-08-19 16:00:48 -0400579 dsa_port_link_unregister_of(dp);
580 break;
Vivien Didelot1d277322017-11-06 16:11:48 -0500581 case DSA_PORT_TYPE_DSA:
Vivien Didelot0394a632019-08-19 16:00:50 -0400582 dsa_port_disable(dp);
Sebastian Reichel33615362018-01-23 16:03:46 +0100583 dsa_port_link_unregister_of(dp);
Vivien Didelot1d277322017-11-06 16:11:48 -0500584 break;
585 case DSA_PORT_TYPE_USER:
Vladimir Oltean11fd6672022-01-06 01:11:17 +0200586 slave = dp->slave;
587
588 if (slave) {
Vivien Didelot1d277322017-11-06 16:11:48 -0500589 dp->slave = NULL;
Vladimir Oltean11fd6672022-01-06 01:11:17 +0200590 dsa_slave_destroy(slave);
Vivien Didelot1d277322017-11-06 16:11:48 -0500591 }
592 break;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200593 }
Vivien Didelotfb35c602019-10-21 16:51:19 -0400594
Vladimir Oltean3f6e32f2021-06-29 17:06:52 +0300595 list_for_each_entry_safe(a, tmp, &dp->fdbs, list) {
596 list_del(&a->list);
597 kfree(a);
598 }
599
Vladimir Oltean161ca592021-06-29 17:06:50 +0300600 list_for_each_entry_safe(a, tmp, &dp->mdbs, list) {
601 list_del(&a->list);
602 kfree(a);
603 }
604
Vivien Didelotfb35c602019-10-21 16:51:19 -0400605 dp->setup = false;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200606}
607
Andrew Lunn31224332020-10-04 18:12:53 +0200608static void dsa_port_devlink_teardown(struct dsa_port *dp)
609{
610 struct devlink_port *dlp = &dp->devlink_port;
611
612 if (dp->devlink_port_setup)
613 devlink_port_unregister(dlp);
614 dp->devlink_port_setup = false;
615}
616
Vladimir Olteanfd292c12021-09-17 17:29:16 +0300617/* Destroy the current devlink port, and create a new one which has the UNUSED
618 * flavour. At this point, any call to ds->ops->port_setup has been already
619 * balanced out by a call to ds->ops->port_teardown, so we know that any
620 * devlink port regions the driver had are now unregistered. We then call its
621 * ds->ops->port_setup again, in order for the driver to re-create them on the
622 * new devlink port.
623 */
624static int dsa_port_reinit_as_unused(struct dsa_port *dp)
625{
626 struct dsa_switch *ds = dp->ds;
627 int err;
628
629 dsa_port_devlink_teardown(dp);
630 dp->type = DSA_PORT_TYPE_UNUSED;
631 err = dsa_port_devlink_setup(dp);
632 if (err)
633 return err;
634
635 if (ds->ops->port_setup) {
636 /* On error, leave the devlink port registered,
637 * dsa_switch_teardown will clean it up later.
638 */
639 err = ds->ops->port_setup(ds, dp->index);
640 if (err)
641 return err;
642 }
643
644 return 0;
645}
646
Andrew Lunn0f06b852020-09-18 21:11:08 +0200647static int dsa_devlink_info_get(struct devlink *dl,
648 struct devlink_info_req *req,
649 struct netlink_ext_ack *extack)
650{
651 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
652
653 if (ds->ops->devlink_info_get)
654 return ds->ops->devlink_info_get(ds, req, extack);
655
656 return -EOPNOTSUPP;
657}
658
Vladimir Oltean2a6ef762021-01-15 04:11:13 +0200659static int dsa_devlink_sb_pool_get(struct devlink *dl,
660 unsigned int sb_index, u16 pool_index,
661 struct devlink_sb_pool_info *pool_info)
662{
663 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
664
665 if (!ds->ops->devlink_sb_pool_get)
666 return -EOPNOTSUPP;
667
668 return ds->ops->devlink_sb_pool_get(ds, sb_index, pool_index,
669 pool_info);
670}
671
672static int dsa_devlink_sb_pool_set(struct devlink *dl, unsigned int sb_index,
673 u16 pool_index, u32 size,
674 enum devlink_sb_threshold_type threshold_type,
675 struct netlink_ext_ack *extack)
676{
677 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
678
679 if (!ds->ops->devlink_sb_pool_set)
680 return -EOPNOTSUPP;
681
682 return ds->ops->devlink_sb_pool_set(ds, sb_index, pool_index, size,
683 threshold_type, extack);
684}
685
686static int dsa_devlink_sb_port_pool_get(struct devlink_port *dlp,
687 unsigned int sb_index, u16 pool_index,
688 u32 *p_threshold)
689{
690 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
691 int port = dsa_devlink_port_to_port(dlp);
692
693 if (!ds->ops->devlink_sb_port_pool_get)
694 return -EOPNOTSUPP;
695
696 return ds->ops->devlink_sb_port_pool_get(ds, port, sb_index,
697 pool_index, p_threshold);
698}
699
700static int dsa_devlink_sb_port_pool_set(struct devlink_port *dlp,
701 unsigned int sb_index, u16 pool_index,
702 u32 threshold,
703 struct netlink_ext_ack *extack)
704{
705 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
706 int port = dsa_devlink_port_to_port(dlp);
707
708 if (!ds->ops->devlink_sb_port_pool_set)
709 return -EOPNOTSUPP;
710
711 return ds->ops->devlink_sb_port_pool_set(ds, port, sb_index,
712 pool_index, threshold, extack);
713}
714
715static int
716dsa_devlink_sb_tc_pool_bind_get(struct devlink_port *dlp,
717 unsigned int sb_index, u16 tc_index,
718 enum devlink_sb_pool_type pool_type,
719 u16 *p_pool_index, u32 *p_threshold)
720{
721 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
722 int port = dsa_devlink_port_to_port(dlp);
723
724 if (!ds->ops->devlink_sb_tc_pool_bind_get)
725 return -EOPNOTSUPP;
726
727 return ds->ops->devlink_sb_tc_pool_bind_get(ds, port, sb_index,
728 tc_index, pool_type,
729 p_pool_index, p_threshold);
730}
731
732static int
733dsa_devlink_sb_tc_pool_bind_set(struct devlink_port *dlp,
734 unsigned int sb_index, u16 tc_index,
735 enum devlink_sb_pool_type pool_type,
736 u16 pool_index, u32 threshold,
737 struct netlink_ext_ack *extack)
738{
739 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
740 int port = dsa_devlink_port_to_port(dlp);
741
742 if (!ds->ops->devlink_sb_tc_pool_bind_set)
743 return -EOPNOTSUPP;
744
745 return ds->ops->devlink_sb_tc_pool_bind_set(ds, port, sb_index,
746 tc_index, pool_type,
747 pool_index, threshold,
748 extack);
749}
750
751static int dsa_devlink_sb_occ_snapshot(struct devlink *dl,
752 unsigned int sb_index)
753{
754 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
755
756 if (!ds->ops->devlink_sb_occ_snapshot)
757 return -EOPNOTSUPP;
758
759 return ds->ops->devlink_sb_occ_snapshot(ds, sb_index);
760}
761
762static int dsa_devlink_sb_occ_max_clear(struct devlink *dl,
763 unsigned int sb_index)
764{
765 struct dsa_switch *ds = dsa_devlink_to_ds(dl);
766
767 if (!ds->ops->devlink_sb_occ_max_clear)
768 return -EOPNOTSUPP;
769
770 return ds->ops->devlink_sb_occ_max_clear(ds, sb_index);
771}
772
773static int dsa_devlink_sb_occ_port_pool_get(struct devlink_port *dlp,
774 unsigned int sb_index,
775 u16 pool_index, u32 *p_cur,
776 u32 *p_max)
777{
778 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
779 int port = dsa_devlink_port_to_port(dlp);
780
781 if (!ds->ops->devlink_sb_occ_port_pool_get)
782 return -EOPNOTSUPP;
783
784 return ds->ops->devlink_sb_occ_port_pool_get(ds, port, sb_index,
785 pool_index, p_cur, p_max);
786}
787
788static int
789dsa_devlink_sb_occ_tc_port_bind_get(struct devlink_port *dlp,
790 unsigned int sb_index, u16 tc_index,
791 enum devlink_sb_pool_type pool_type,
792 u32 *p_cur, u32 *p_max)
793{
794 struct dsa_switch *ds = dsa_devlink_port_to_ds(dlp);
795 int port = dsa_devlink_port_to_port(dlp);
796
797 if (!ds->ops->devlink_sb_occ_tc_port_bind_get)
798 return -EOPNOTSUPP;
799
800 return ds->ops->devlink_sb_occ_tc_port_bind_get(ds, port,
801 sb_index, tc_index,
802 pool_type, p_cur,
803 p_max);
804}
805
Andrew Lunn0f06b852020-09-18 21:11:08 +0200806static const struct devlink_ops dsa_devlink_ops = {
Vladimir Oltean2a6ef762021-01-15 04:11:13 +0200807 .info_get = dsa_devlink_info_get,
808 .sb_pool_get = dsa_devlink_sb_pool_get,
809 .sb_pool_set = dsa_devlink_sb_pool_set,
810 .sb_port_pool_get = dsa_devlink_sb_port_pool_get,
811 .sb_port_pool_set = dsa_devlink_sb_port_pool_set,
812 .sb_tc_pool_bind_get = dsa_devlink_sb_tc_pool_bind_get,
813 .sb_tc_pool_bind_set = dsa_devlink_sb_tc_pool_bind_set,
814 .sb_occ_snapshot = dsa_devlink_sb_occ_snapshot,
815 .sb_occ_max_clear = dsa_devlink_sb_occ_max_clear,
816 .sb_occ_port_pool_get = dsa_devlink_sb_occ_port_pool_get,
817 .sb_occ_tc_port_bind_get = dsa_devlink_sb_occ_tc_port_bind_get,
Andrew Lunn0f06b852020-09-18 21:11:08 +0200818};
819
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200820static int dsa_switch_setup_tag_protocol(struct dsa_switch *ds)
821{
822 const struct dsa_device_ops *tag_ops = ds->dst->tag_ops;
823 struct dsa_switch_tree *dst = ds->dst;
Vladimir Olteand0004a02021-10-20 20:49:50 +0300824 struct dsa_port *cpu_dp;
825 int err;
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200826
827 if (tag_ops->proto == dst->default_proto)
Vladimir Olteandc452a42021-12-10 01:34:37 +0200828 goto connect;
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200829
Vladimir Olteand0004a02021-10-20 20:49:50 +0300830 dsa_switch_for_each_cpu_port(cpu_dp, ds) {
Vladimir Oltean1951b3f2021-10-09 15:26:07 +0300831 rtnl_lock();
Vladimir Olteand0004a02021-10-20 20:49:50 +0300832 err = ds->ops->change_tag_protocol(ds, cpu_dp->index,
833 tag_ops->proto);
Vladimir Oltean1951b3f2021-10-09 15:26:07 +0300834 rtnl_unlock();
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200835 if (err) {
836 dev_err(ds->dev, "Unable to use tag protocol \"%s\": %pe\n",
837 tag_ops->name, ERR_PTR(err));
838 return err;
839 }
840 }
841
Vladimir Olteandc452a42021-12-10 01:34:37 +0200842connect:
Vladimir Oltean7f297312021-12-14 03:45:36 +0200843 if (tag_ops->connect) {
844 err = tag_ops->connect(ds);
845 if (err)
846 return err;
847 }
848
Vladimir Olteandc452a42021-12-10 01:34:37 +0200849 if (ds->ops->connect_tag_protocol) {
850 err = ds->ops->connect_tag_protocol(ds, tag_ops->proto);
851 if (err) {
852 dev_err(ds->dev,
853 "Unable to connect to tag protocol \"%s\": %pe\n",
854 tag_ops->name, ERR_PTR(err));
Vladimir Oltean7f297312021-12-14 03:45:36 +0200855 goto disconnect;
Vladimir Olteandc452a42021-12-10 01:34:37 +0200856 }
857 }
858
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200859 return 0;
Vladimir Oltean7f297312021-12-14 03:45:36 +0200860
861disconnect:
862 if (tag_ops->disconnect)
863 tag_ops->disconnect(ds);
864
865 return err;
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200866}
867
Vivien Didelot1f08f9e2017-11-06 16:11:47 -0500868static int dsa_switch_setup(struct dsa_switch *ds)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200869{
Andrew Lunn6b297522019-10-25 01:03:51 +0200870 struct dsa_devlink_priv *dl_priv;
Andrew Lunn31224332020-10-04 18:12:53 +0200871 struct dsa_port *dp;
Vivien Didelotfb35c602019-10-21 16:51:19 -0400872 int err;
873
874 if (ds->setup)
875 return 0;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200876
Florian Fainelli6e830d82016-06-07 16:32:39 -0700877 /* Initialize ds->phys_mii_mask before registering the slave MDIO bus
Vivien Didelot9d490b42016-08-23 12:38:56 -0400878 * driver and before ops->setup() has run, since the switch drivers and
Florian Fainelli6e830d82016-06-07 16:32:39 -0700879 * the slave MDIO bus driver rely on these values for probing PHY
880 * devices or not
881 */
Vivien Didelot02bc6e52017-10-26 11:22:56 -0400882 ds->phys_mii_mask |= dsa_user_ports(ds);
Florian Fainelli6e830d82016-06-07 16:32:39 -0700883
Andrew Lunn96567d52017-03-28 23:45:07 +0200884 /* Add the switch to devlink before calling setup, so that setup can
885 * add dpipe tables
886 */
Leon Romanovsky919d13a2021-08-08 21:57:43 +0300887 ds->devlink =
888 devlink_alloc(&dsa_devlink_ops, sizeof(*dl_priv), ds->dev);
Andrew Lunn96567d52017-03-28 23:45:07 +0200889 if (!ds->devlink)
890 return -ENOMEM;
Andrew Lunn6b297522019-10-25 01:03:51 +0200891 dl_priv = devlink_priv(ds->devlink);
892 dl_priv->ds = ds;
Andrew Lunn96567d52017-03-28 23:45:07 +0200893
Andrew Lunn31224332020-10-04 18:12:53 +0200894 /* Setup devlink port instances now, so that the switch
895 * setup() can register regions etc, against the ports
896 */
Vladimir Oltean65c563a2021-10-20 20:49:51 +0300897 dsa_switch_for_each_port(dp, ds) {
898 err = dsa_port_devlink_setup(dp);
899 if (err)
900 goto unregister_devlink_ports;
Andrew Lunn31224332020-10-04 18:12:53 +0200901 }
902
Vivien Didelotf515f192017-02-03 13:20:20 -0500903 err = dsa_switch_register_notifier(ds);
904 if (err)
Andrew Lunn31224332020-10-04 18:12:53 +0200905 goto unregister_devlink_ports;
Vivien Didelotf515f192017-02-03 13:20:20 -0500906
Vladimir Oltean0ee2af42021-01-16 01:19:19 +0200907 ds->configure_vlan_while_not_filtering = true;
908
Vladimir Olteanb2243b32019-05-05 13:19:20 +0300909 err = ds->ops->setup(ds);
910 if (err < 0)
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300911 goto unregister_notifier;
Vladimir Olteanb2243b32019-05-05 13:19:20 +0300912
Tobias Waldekranzdeff7102021-04-20 20:53:10 +0200913 err = dsa_switch_setup_tag_protocol(ds);
914 if (err)
915 goto teardown;
916
Vivien Didelot9d490b42016-08-23 12:38:56 -0400917 if (!ds->slave_mii_bus && ds->ops->phy_read) {
Vladimir Oltean5135e962021-09-21 00:42:08 +0300918 ds->slave_mii_bus = mdiobus_alloc();
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300919 if (!ds->slave_mii_bus) {
920 err = -ENOMEM;
Vladimir Oltean8fd54a72021-02-04 18:33:51 +0200921 goto teardown;
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300922 }
Florian Fainelli1eb59442016-06-07 16:32:40 -0700923
924 dsa_slave_mii_bus_init(ds);
925
926 err = mdiobus_register(ds->slave_mii_bus);
927 if (err < 0)
Vladimir Oltean5135e962021-09-21 00:42:08 +0300928 goto free_slave_mii_bus;
Florian Fainelli1eb59442016-06-07 16:32:40 -0700929 }
930
Vivien Didelotfb35c602019-10-21 16:51:19 -0400931 ds->setup = true;
Leon Romanovskybd936bd2021-09-25 14:23:01 +0300932 devlink_register(ds->devlink);
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200933 return 0;
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300934
Vladimir Oltean5135e962021-09-21 00:42:08 +0300935free_slave_mii_bus:
936 if (ds->slave_mii_bus && ds->ops->phy_read)
937 mdiobus_free(ds->slave_mii_bus);
Vladimir Oltean8fd54a72021-02-04 18:33:51 +0200938teardown:
939 if (ds->ops->teardown)
940 ds->ops->teardown(ds);
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300941unregister_notifier:
942 dsa_switch_unregister_notifier(ds);
Andrew Lunn31224332020-10-04 18:12:53 +0200943unregister_devlink_ports:
Vladimir Oltean65c563a2021-10-20 20:49:51 +0300944 dsa_switch_for_each_port(dp, ds)
945 dsa_port_devlink_teardown(dp);
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300946 devlink_free(ds->devlink);
947 ds->devlink = NULL;
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +0300948 return err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200949}
950
Vivien Didelot1f08f9e2017-11-06 16:11:47 -0500951static void dsa_switch_teardown(struct dsa_switch *ds)
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200952{
Andrew Lunn31224332020-10-04 18:12:53 +0200953 struct dsa_port *dp;
954
Vivien Didelotfb35c602019-10-21 16:51:19 -0400955 if (!ds->setup)
956 return;
957
Leon Romanovskybd936bd2021-09-25 14:23:01 +0300958 if (ds->devlink)
959 devlink_unregister(ds->devlink);
960
Vladimir Oltean5135e962021-09-21 00:42:08 +0300961 if (ds->slave_mii_bus && ds->ops->phy_read) {
Florian Fainelli1eb59442016-06-07 16:32:40 -0700962 mdiobus_unregister(ds->slave_mii_bus);
Vladimir Oltean5135e962021-09-21 00:42:08 +0300963 mdiobus_free(ds->slave_mii_bus);
964 ds->slave_mii_bus = NULL;
965 }
Vivien Didelotf515f192017-02-03 13:20:20 -0500966
Vladimir Oltean5e3f8472019-06-08 15:04:28 +0300967 if (ds->ops->teardown)
968 ds->ops->teardown(ds);
969
Vladimir Oltean39e222b2021-10-12 15:37:35 +0300970 dsa_switch_unregister_notifier(ds);
971
Andrew Lunn96567d52017-03-28 23:45:07 +0200972 if (ds->devlink) {
Vladimir Oltean65c563a2021-10-20 20:49:51 +0300973 dsa_switch_for_each_port(dp, ds)
974 dsa_port_devlink_teardown(dp);
Andrew Lunn96567d52017-03-28 23:45:07 +0200975 devlink_free(ds->devlink);
976 ds->devlink = NULL;
977 }
978
Vivien Didelotfb35c602019-10-21 16:51:19 -0400979 ds->setup = false;
Andrew Lunn83c0afa2016-06-04 21:17:07 +0200980}
981
Vladimir Olteana57d8c212021-09-14 16:47:26 +0300982/* First tear down the non-shared, then the shared ports. This ensures that
983 * all work items scheduled by our switchdev handlers for user ports have
984 * completed before we destroy the refcounting kept on the shared ports.
985 */
986static void dsa_tree_teardown_ports(struct dsa_switch_tree *dst)
987{
988 struct dsa_port *dp;
989
990 list_for_each_entry(dp, &dst->ports, list)
991 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp))
992 dsa_port_teardown(dp);
993
994 dsa_flush_workqueue();
995
996 list_for_each_entry(dp, &dst->ports, list)
997 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp))
998 dsa_port_teardown(dp);
999}
1000
1001static void dsa_tree_teardown_switches(struct dsa_switch_tree *dst)
1002{
1003 struct dsa_port *dp;
1004
1005 list_for_each_entry(dp, &dst->ports, list)
1006 dsa_switch_teardown(dp->ds);
1007}
1008
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001009/* Bring shared ports up first, then non-shared ports */
1010static int dsa_tree_setup_ports(struct dsa_switch_tree *dst)
Vivien Didelot1f08f9e2017-11-06 16:11:47 -05001011{
Vivien Didelot1d277322017-11-06 16:11:48 -05001012 struct dsa_port *dp;
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001013 int err = 0;
Vivien Didelot1f08f9e2017-11-06 16:11:47 -05001014
Vivien Didelotfb35c602019-10-21 16:51:19 -04001015 list_for_each_entry(dp, &dst->ports, list) {
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001016 if (dsa_port_is_dsa(dp) || dsa_port_is_cpu(dp)) {
1017 err = dsa_port_setup(dp);
1018 if (err)
1019 goto teardown;
1020 }
Vivien Didelotfb35c602019-10-21 16:51:19 -04001021 }
Vivien Didelot1d277322017-11-06 16:11:48 -05001022
Vivien Didelotfb35c602019-10-21 16:51:19 -04001023 list_for_each_entry(dp, &dst->ports, list) {
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001024 if (dsa_port_is_user(dp) || dsa_port_is_unused(dp)) {
1025 err = dsa_port_setup(dp);
1026 if (err) {
1027 err = dsa_port_reinit_as_unused(dp);
1028 if (err)
1029 goto teardown;
1030 }
Maxim Kochetkovfb6ec872021-03-29 18:30:16 +03001031 }
Vivien Didelot1f08f9e2017-11-06 16:11:47 -05001032 }
1033
1034 return 0;
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001035
Vivien Didelotfb35c602019-10-21 16:51:19 -04001036teardown:
Vladimir Olteana57d8c212021-09-14 16:47:26 +03001037 dsa_tree_teardown_ports(dst);
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001038
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001039 return err;
1040}
1041
1042static int dsa_tree_setup_switches(struct dsa_switch_tree *dst)
1043{
1044 struct dsa_port *dp;
1045 int err = 0;
1046
1047 list_for_each_entry(dp, &dst->ports, list) {
1048 err = dsa_switch_setup(dp->ds);
1049 if (err) {
1050 dsa_tree_teardown_switches(dst);
1051 break;
1052 }
1053 }
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001054
1055 return err;
Vivien Didelot1f08f9e2017-11-06 16:11:47 -05001056}
1057
Vivien Didelot17a22fc2017-11-06 16:11:45 -05001058static int dsa_tree_setup_master(struct dsa_switch_tree *dst)
1059{
Vivien Didelot0cfec582019-10-21 16:51:22 -04001060 struct dsa_port *dp;
1061 int err;
Vivien Didelot17a22fc2017-11-06 16:11:45 -05001062
Vladimir Olteanc146f9b2022-01-06 01:11:15 +02001063 rtnl_lock();
1064
Vivien Didelot0cfec582019-10-21 16:51:22 -04001065 list_for_each_entry(dp, &dst->ports, list) {
1066 if (dsa_port_is_cpu(dp)) {
1067 err = dsa_master_setup(dp->master, dp);
1068 if (err)
1069 return err;
1070 }
1071 }
1072
Vladimir Olteanc146f9b2022-01-06 01:11:15 +02001073 rtnl_unlock();
1074
Vivien Didelot0cfec582019-10-21 16:51:22 -04001075 return 0;
Vivien Didelot17a22fc2017-11-06 16:11:45 -05001076}
1077
1078static void dsa_tree_teardown_master(struct dsa_switch_tree *dst)
1079{
Vivien Didelot0cfec582019-10-21 16:51:22 -04001080 struct dsa_port *dp;
Vivien Didelot17a22fc2017-11-06 16:11:45 -05001081
Vladimir Olteanc146f9b2022-01-06 01:11:15 +02001082 rtnl_lock();
1083
Vivien Didelot0cfec582019-10-21 16:51:22 -04001084 list_for_each_entry(dp, &dst->ports, list)
1085 if (dsa_port_is_cpu(dp))
1086 dsa_master_teardown(dp->master);
Vladimir Olteanc146f9b2022-01-06 01:11:15 +02001087
1088 rtnl_unlock();
Vivien Didelot17a22fc2017-11-06 16:11:45 -05001089}
1090
Tobias Waldekranz058102a2021-01-13 09:42:53 +01001091static int dsa_tree_setup_lags(struct dsa_switch_tree *dst)
1092{
1093 unsigned int len = 0;
1094 struct dsa_port *dp;
1095
1096 list_for_each_entry(dp, &dst->ports, list) {
1097 if (dp->ds->num_lag_ids > len)
1098 len = dp->ds->num_lag_ids;
1099 }
1100
1101 if (!len)
1102 return 0;
1103
1104 dst->lags = kcalloc(len, sizeof(*dst->lags), GFP_KERNEL);
1105 if (!dst->lags)
1106 return -ENOMEM;
1107
1108 dst->lags_len = len;
1109 return 0;
1110}
1111
1112static void dsa_tree_teardown_lags(struct dsa_switch_tree *dst)
1113{
1114 kfree(dst->lags);
1115}
1116
Vivien Didelotec15dd42017-11-06 16:11:46 -05001117static int dsa_tree_setup(struct dsa_switch_tree *dst)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001118{
Vivien Didelot34c09a82017-11-06 16:11:51 -05001119 bool complete;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001120 int err;
1121
Vivien Didelotec15dd42017-11-06 16:11:46 -05001122 if (dst->setup) {
1123 pr_err("DSA: tree %d already setup! Disjoint trees?\n",
1124 dst->index);
1125 return -EEXIST;
1126 }
1127
Vivien Didelot34c09a82017-11-06 16:11:51 -05001128 complete = dsa_tree_setup_routing_table(dst);
1129 if (!complete)
1130 return 0;
1131
Vladimir Oltean2c0b0322021-08-04 16:54:30 +03001132 err = dsa_tree_setup_cpu_ports(dst);
Vivien Didelotf0704642017-11-06 16:11:44 -05001133 if (err)
1134 return err;
1135
Vivien Didelot1f08f9e2017-11-06 16:11:47 -05001136 err = dsa_tree_setup_switches(dst);
1137 if (err)
Vladimir Oltean0e8eb9a2021-08-04 16:54:29 +03001138 goto teardown_cpu_ports;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001139
Vladimir Oltean11fd6672022-01-06 01:11:17 +02001140 err = dsa_tree_setup_master(dst);
Vivien Didelot19435632017-09-19 11:56:59 -04001141 if (err)
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001142 goto teardown_switches;
Vivien Didelot19435632017-09-19 11:56:59 -04001143
Vladimir Oltean11fd6672022-01-06 01:11:17 +02001144 err = dsa_tree_setup_ports(dst);
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001145 if (err)
Vladimir Oltean11fd6672022-01-06 01:11:17 +02001146 goto teardown_master;
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001147
Tobias Waldekranz058102a2021-01-13 09:42:53 +01001148 err = dsa_tree_setup_lags(dst);
1149 if (err)
Vladimir Oltean11fd6672022-01-06 01:11:17 +02001150 goto teardown_ports;
Tobias Waldekranz058102a2021-01-13 09:42:53 +01001151
Vivien Didelotec15dd42017-11-06 16:11:46 -05001152 dst->setup = true;
1153
1154 pr_info("DSA: tree %d setup\n", dst->index);
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001155
1156 return 0;
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001157
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001158teardown_ports:
Vladimir Olteane5845aa2021-09-21 01:49:18 +03001159 dsa_tree_teardown_ports(dst);
Vladimir Oltean11fd6672022-01-06 01:11:17 +02001160teardown_master:
1161 dsa_tree_teardown_master(dst);
Vladimir Oltean1e3f4072022-01-06 01:11:16 +02001162teardown_switches:
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001163 dsa_tree_teardown_switches(dst);
Vladimir Oltean0e8eb9a2021-08-04 16:54:29 +03001164teardown_cpu_ports:
1165 dsa_tree_teardown_cpu_ports(dst);
Ioana Ciorneie70c7aa2019-05-30 09:09:07 +03001166
1167 return err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001168}
1169
Vivien Didelotec15dd42017-11-06 16:11:46 -05001170static void dsa_tree_teardown(struct dsa_switch_tree *dst)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001171{
Vivien Didelotc5f51762019-10-30 22:09:13 -04001172 struct dsa_link *dl, *next;
1173
Vivien Didelotec15dd42017-11-06 16:11:46 -05001174 if (!dst->setup)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001175 return;
1176
Tobias Waldekranz058102a2021-01-13 09:42:53 +01001177 dsa_tree_teardown_lags(dst);
1178
Vladimir Olteana57d8c212021-09-14 16:47:26 +03001179 dsa_tree_teardown_ports(dst);
1180
Vladimir Oltean11fd6672022-01-06 01:11:17 +02001181 dsa_tree_teardown_master(dst);
1182
Vivien Didelot1f08f9e2017-11-06 16:11:47 -05001183 dsa_tree_teardown_switches(dst);
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001184
Vladimir Oltean0e8eb9a2021-08-04 16:54:29 +03001185 dsa_tree_teardown_cpu_ports(dst);
Florian Fainelli0c73c522016-06-07 16:32:42 -07001186
Vivien Didelotc5f51762019-10-30 22:09:13 -04001187 list_for_each_entry_safe(dl, next, &dst->rtable, list) {
1188 list_del(&dl->list);
1189 kfree(dl);
1190 }
1191
Vivien Didelotec15dd42017-11-06 16:11:46 -05001192 pr_info("DSA: tree %d torn down\n", dst->index);
1193
1194 dst->setup = false;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001195}
1196
Vladimir Olteandc452a42021-12-10 01:34:37 +02001197static int dsa_tree_bind_tag_proto(struct dsa_switch_tree *dst,
1198 const struct dsa_device_ops *tag_ops)
1199{
1200 const struct dsa_device_ops *old_tag_ops = dst->tag_ops;
1201 struct dsa_notifier_tag_proto_info info;
1202 int err;
1203
1204 dst->tag_ops = tag_ops;
1205
Vladimir Olteandc452a42021-12-10 01:34:37 +02001206 /* Notify the switches from this tree about the connection
1207 * to the new tagger
1208 */
1209 info.tag_ops = tag_ops;
1210 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_CONNECT, &info);
1211 if (err && err != -EOPNOTSUPP)
1212 goto out_disconnect;
1213
1214 /* Notify the old tagger about the disconnection from this tree */
Vladimir Oltean7f297312021-12-14 03:45:36 +02001215 info.tag_ops = old_tag_ops;
1216 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
Vladimir Olteandc452a42021-12-10 01:34:37 +02001217
1218 return 0;
1219
1220out_disconnect:
Vladimir Oltean7f297312021-12-14 03:45:36 +02001221 info.tag_ops = tag_ops;
1222 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO_DISCONNECT, &info);
Vladimir Olteandc452a42021-12-10 01:34:37 +02001223 dst->tag_ops = old_tag_ops;
1224
1225 return err;
1226}
1227
Vladimir Oltean53da0eb2021-01-29 03:00:06 +02001228/* Since the dsa/tagging sysfs device attribute is per master, the assumption
1229 * is that all DSA switches within a tree share the same tagger, otherwise
1230 * they would have formed disjoint trees (different "dsa,member" values).
1231 */
1232int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
1233 struct net_device *master,
1234 const struct dsa_device_ops *tag_ops,
1235 const struct dsa_device_ops *old_tag_ops)
1236{
1237 struct dsa_notifier_tag_proto_info info;
1238 struct dsa_port *dp;
1239 int err = -EBUSY;
1240
1241 if (!rtnl_trylock())
1242 return restart_syscall();
1243
1244 /* At the moment we don't allow changing the tag protocol under
1245 * traffic. The rtnl_mutex also happens to serialize concurrent
1246 * attempts to change the tagging protocol. If we ever lift the IFF_UP
1247 * restriction, there needs to be another mutex which serializes this.
1248 */
1249 if (master->flags & IFF_UP)
1250 goto out_unlock;
1251
1252 list_for_each_entry(dp, &dst->ports, list) {
Vladimir Olteand0004a02021-10-20 20:49:50 +03001253 if (!dsa_port_is_user(dp))
Vladimir Oltean53da0eb2021-01-29 03:00:06 +02001254 continue;
1255
1256 if (dp->slave->flags & IFF_UP)
1257 goto out_unlock;
1258 }
1259
Vladimir Olteandc452a42021-12-10 01:34:37 +02001260 /* Notify the tag protocol change */
Vladimir Oltean53da0eb2021-01-29 03:00:06 +02001261 info.tag_ops = tag_ops;
1262 err = dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1263 if (err)
Vladimir Olteandc452a42021-12-10 01:34:37 +02001264 return err;
Vladimir Oltean53da0eb2021-01-29 03:00:06 +02001265
Vladimir Olteandc452a42021-12-10 01:34:37 +02001266 err = dsa_tree_bind_tag_proto(dst, tag_ops);
1267 if (err)
1268 goto out_unwind_tagger;
Vladimir Oltean53da0eb2021-01-29 03:00:06 +02001269
1270 rtnl_unlock();
1271
1272 return 0;
1273
1274out_unwind_tagger:
1275 info.tag_ops = old_tag_ops;
1276 dsa_tree_notify(dst, DSA_NOTIFIER_TAG_PROTO, &info);
1277out_unlock:
1278 rtnl_unlock();
1279 return err;
1280}
1281
Vivien Didelotab8ccae2019-10-21 16:51:16 -04001282static struct dsa_port *dsa_port_touch(struct dsa_switch *ds, int index)
1283{
1284 struct dsa_switch_tree *dst = ds->dst;
1285 struct dsa_port *dp;
1286
Vladimir Oltean65c563a2021-10-20 20:49:51 +03001287 dsa_switch_for_each_port(dp, ds)
1288 if (dp->index == index)
Vivien Didelot05f294a2019-10-21 16:51:29 -04001289 return dp;
1290
1291 dp = kzalloc(sizeof(*dp), GFP_KERNEL);
1292 if (!dp)
1293 return NULL;
Vivien Didelotab8ccae2019-10-21 16:51:16 -04001294
1295 dp->ds = ds;
1296 dp->index = index;
1297
1298 INIT_LIST_HEAD(&dp->list);
1299 list_add_tail(&dp->list, &dst->ports);
1300
1301 return dp;
1302}
1303
Vivien Didelot06e24d02017-11-03 19:05:29 -04001304static int dsa_port_parse_user(struct dsa_port *dp, const char *name)
1305{
1306 if (!name)
1307 name = "eth%d";
1308
1309 dp->type = DSA_PORT_TYPE_USER;
1310 dp->name = name;
1311
1312 return 0;
1313}
1314
1315static int dsa_port_parse_dsa(struct dsa_port *dp)
1316{
1317 dp->type = DSA_PORT_TYPE_DSA;
1318
1319 return 0;
1320}
1321
Florian Fainelli4d776482020-01-07 21:06:05 -08001322static enum dsa_tag_protocol dsa_get_tag_protocol(struct dsa_port *dp,
1323 struct net_device *master)
1324{
1325 enum dsa_tag_protocol tag_protocol = DSA_TAG_PROTO_NONE;
1326 struct dsa_switch *mds, *ds = dp->ds;
1327 unsigned int mdp_upstream;
1328 struct dsa_port *mdp;
1329
1330 /* It is possible to stack DSA switches onto one another when that
1331 * happens the switch driver may want to know if its tagging protocol
1332 * is going to work in such a configuration.
1333 */
1334 if (dsa_slave_dev_check(master)) {
1335 mdp = dsa_slave_to_port(master);
1336 mds = mdp->ds;
1337 mdp_upstream = dsa_upstream_port(mds, mdp->index);
1338 tag_protocol = mds->ops->get_tag_protocol(mds, mdp_upstream,
1339 DSA_TAG_PROTO_NONE);
1340 }
1341
1342 /* If the master device is not itself a DSA slave in a disjoint DSA
1343 * tree, then return immediately.
1344 */
1345 return ds->ops->get_tag_protocol(ds, dp->index, tag_protocol);
1346}
1347
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001348static int dsa_port_parse_cpu(struct dsa_port *dp, struct net_device *master,
1349 const char *user_protocol)
Vivien Didelot06e24d02017-11-03 19:05:29 -04001350{
Vivien Didelot7354fcb2017-11-03 19:05:30 -04001351 struct dsa_switch *ds = dp->ds;
1352 struct dsa_switch_tree *dst = ds->dst;
George McCollistere0c755a2021-03-22 15:26:50 -05001353 const struct dsa_device_ops *tag_ops;
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001354 enum dsa_tag_protocol default_proto;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001355
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001356 /* Find out which protocol the switch would prefer. */
1357 default_proto = dsa_get_tag_protocol(dp, master);
1358 if (dst->default_proto) {
1359 if (dst->default_proto != default_proto) {
Vladimir Oltean357f2032021-01-29 03:00:05 +02001360 dev_err(ds->dev,
1361 "A DSA switch tree can have only one tagging protocol\n");
1362 return -EINVAL;
1363 }
Vladimir Oltean357f2032021-01-29 03:00:05 +02001364 } else {
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001365 dst->default_proto = default_proto;
1366 }
1367
1368 /* See if the user wants to override that preference. */
1369 if (user_protocol) {
1370 if (!ds->ops->change_tag_protocol) {
1371 dev_err(ds->dev, "Tag protocol cannot be modified\n");
1372 return -EINVAL;
Vladimir Oltean357f2032021-01-29 03:00:05 +02001373 }
George McCollistere0c755a2021-03-22 15:26:50 -05001374
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001375 tag_ops = dsa_find_tagger_by_name(user_protocol);
1376 } else {
1377 tag_ops = dsa_tag_driver_get(default_proto);
1378 }
1379
1380 if (IS_ERR(tag_ops)) {
1381 if (PTR_ERR(tag_ops) == -ENOPROTOOPT)
1382 return -EPROBE_DEFER;
1383
1384 dev_warn(ds->dev, "No tagger for this switch\n");
1385 return PTR_ERR(tag_ops);
1386 }
1387
1388 if (dst->tag_ops) {
1389 if (dst->tag_ops != tag_ops) {
1390 dev_err(ds->dev,
1391 "A DSA switch tree can have only one tagging protocol\n");
1392
1393 dsa_tag_driver_put(tag_ops);
1394 return -EINVAL;
1395 }
1396
1397 /* In the case of multiple CPU ports per switch, the tagging
1398 * protocol is still reference-counted only per switch tree.
1399 */
1400 dsa_tag_driver_put(tag_ops);
1401 } else {
George McCollistere0c755a2021-03-22 15:26:50 -05001402 dst->tag_ops = tag_ops;
Florian Fainelli9f9e7722017-07-24 10:49:23 -07001403 }
1404
Florian Fainelli4d776482020-01-07 21:06:05 -08001405 dp->master = master;
Vivien Didelot7354fcb2017-11-03 19:05:30 -04001406 dp->type = DSA_PORT_TYPE_CPU;
Vladimir Oltean53da0eb2021-01-29 03:00:06 +02001407 dsa_port_set_tag_protocol(dp, dst->tag_ops);
Vivien Didelot7354fcb2017-11-03 19:05:30 -04001408 dp->dst = dst;
Vivien Didelot3e41f932017-09-29 17:19:19 -04001409
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001410 /* At this point, the tree may be configured to use a different
1411 * tagger than the one chosen by the switch driver during
1412 * .setup, in the case when a user selects a custom protocol
1413 * through the DT.
1414 *
1415 * This is resolved by syncing the driver with the tree in
1416 * dsa_switch_setup_tag_protocol once .setup has run and the
1417 * driver is ready to accept calls to .change_tag_protocol. If
1418 * the driver does not support the custom protocol at that
1419 * point, the tree is wholly rejected, thereby ensuring that the
1420 * tree and driver are always in agreement on the protocol to
1421 * use.
1422 */
Vivien Didelot7354fcb2017-11-03 19:05:30 -04001423 return 0;
1424}
1425
Vivien Didelotfd223e22017-10-27 15:55:14 -04001426static int dsa_port_parse_of(struct dsa_port *dp, struct device_node *dn)
1427{
Vivien Didelot6d4e5c52017-10-27 15:55:15 -04001428 struct device_node *ethernet = of_parse_phandle(dn, "ethernet", 0);
Vivien Didelot1838fa82017-10-27 15:55:18 -04001429 const char *name = of_get_property(dn, "label", NULL);
Vivien Didelot54df6fa2017-11-03 19:05:28 -04001430 bool link = of_property_read_bool(dn, "link");
Vivien Didelot6d4e5c52017-10-27 15:55:15 -04001431
Vivien Didelot06e24d02017-11-03 19:05:29 -04001432 dp->dn = dn;
1433
Vivien Didelot6d4e5c52017-10-27 15:55:15 -04001434 if (ethernet) {
Vivien Didelotcbabb0a2017-10-27 15:55:17 -04001435 struct net_device *master;
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001436 const char *user_protocol;
Vivien Didelotcbabb0a2017-10-27 15:55:17 -04001437
1438 master = of_find_net_device_by_node(ethernet);
1439 if (!master)
1440 return -EPROBE_DEFER;
1441
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001442 user_protocol = of_get_property(dn, "dsa-tag-protocol", NULL);
1443 return dsa_port_parse_cpu(dp, master, user_protocol);
Vivien Didelot6d4e5c52017-10-27 15:55:15 -04001444 }
1445
Vivien Didelot06e24d02017-11-03 19:05:29 -04001446 if (link)
1447 return dsa_port_parse_dsa(dp);
Vivien Didelotfd223e22017-10-27 15:55:14 -04001448
Vivien Didelot06e24d02017-11-03 19:05:29 -04001449 return dsa_port_parse_user(dp, name);
Vivien Didelotfd223e22017-10-27 15:55:14 -04001450}
1451
Vivien Didelot975e6e32017-11-03 19:05:27 -04001452static int dsa_switch_parse_ports_of(struct dsa_switch *ds,
1453 struct device_node *dn)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001454{
Vivien Didelot5b32fe02017-10-27 15:55:13 -04001455 struct device_node *ports, *port;
Vivien Didelotfd223e22017-10-27 15:55:14 -04001456 struct dsa_port *dp;
Wen Yang9919a362019-02-25 15:22:19 +08001457 int err = 0;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001458 u32 reg;
Vivien Didelot5b32fe02017-10-27 15:55:13 -04001459
1460 ports = of_get_child_by_name(dn, "ports");
1461 if (!ports) {
Kurt Kanzenbach85e05d22020-07-20 14:49:39 +02001462 /* The second possibility is "ethernet-ports" */
1463 ports = of_get_child_by_name(dn, "ethernet-ports");
1464 if (!ports) {
1465 dev_err(ds->dev, "no ports child node found\n");
1466 return -EINVAL;
1467 }
Vivien Didelot5b32fe02017-10-27 15:55:13 -04001468 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001469
1470 for_each_available_child_of_node(ports, port) {
1471 err = of_property_read_u32(port, "reg", &reg);
Christophe JAILLETba69fd92021-10-18 21:59:00 +02001472 if (err) {
1473 of_node_put(port);
Wen Yang9919a362019-02-25 15:22:19 +08001474 goto out_put_node;
Christophe JAILLETba69fd92021-10-18 21:59:00 +02001475 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001476
Wen Yang9919a362019-02-25 15:22:19 +08001477 if (reg >= ds->num_ports) {
Vladimir Oltean258030a2022-01-05 15:21:39 +02001478 dev_err(ds->dev, "port %pOF index %u exceeds num_ports (%u)\n",
Rafał Miłecki8209f5b2021-01-06 10:09:15 +01001479 port, reg, ds->num_ports);
Christophe JAILLETba69fd92021-10-18 21:59:00 +02001480 of_node_put(port);
Wen Yang9919a362019-02-25 15:22:19 +08001481 err = -EINVAL;
1482 goto out_put_node;
1483 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001484
Vivien Didelot68bb8ea2019-10-21 16:51:15 -04001485 dp = dsa_to_port(ds, reg);
Vivien Didelotfd223e22017-10-27 15:55:14 -04001486
1487 err = dsa_port_parse_of(dp, port);
Christophe JAILLETba69fd92021-10-18 21:59:00 +02001488 if (err) {
1489 of_node_put(port);
Wen Yang9919a362019-02-25 15:22:19 +08001490 goto out_put_node;
Christophe JAILLETba69fd92021-10-18 21:59:00 +02001491 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001492 }
1493
Wen Yang9919a362019-02-25 15:22:19 +08001494out_put_node:
1495 of_node_put(ports);
1496 return err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001497}
1498
Vivien Didelot975e6e32017-11-03 19:05:27 -04001499static int dsa_switch_parse_member_of(struct dsa_switch *ds,
1500 struct device_node *dn)
1501{
1502 u32 m[2] = { 0, 0 };
1503 int sz;
1504
1505 /* Don't error out if this optional property isn't found */
1506 sz = of_property_read_variable_u32_array(dn, "dsa,member", m, 2, 2);
1507 if (sz < 0 && sz != -EINVAL)
1508 return sz;
1509
1510 ds->index = m[1];
Vivien Didelot975e6e32017-11-03 19:05:27 -04001511
1512 ds->dst = dsa_tree_touch(m[0]);
1513 if (!ds->dst)
1514 return -ENOMEM;
1515
Vladimir Oltean8674f8d2021-06-21 19:42:14 +03001516 if (dsa_switch_find(ds->dst->index, ds->index)) {
1517 dev_err(ds->dev,
1518 "A DSA switch with index %d already exists in tree %d\n",
1519 ds->index, ds->dst->index);
1520 return -EEXIST;
1521 }
1522
Vladimir Oltean5b22d362021-07-22 18:55:39 +03001523 if (ds->dst->last_switch < ds->index)
1524 ds->dst->last_switch = ds->index;
1525
Vivien Didelot975e6e32017-11-03 19:05:27 -04001526 return 0;
1527}
1528
Vivien Didelotab8ccae2019-10-21 16:51:16 -04001529static int dsa_switch_touch_ports(struct dsa_switch *ds)
1530{
1531 struct dsa_port *dp;
1532 int port;
1533
1534 for (port = 0; port < ds->num_ports; port++) {
1535 dp = dsa_port_touch(ds, port);
1536 if (!dp)
1537 return -ENOMEM;
1538 }
1539
1540 return 0;
1541}
1542
Vivien Didelot975e6e32017-11-03 19:05:27 -04001543static int dsa_switch_parse_of(struct dsa_switch *ds, struct device_node *dn)
1544{
1545 int err;
1546
1547 err = dsa_switch_parse_member_of(ds, dn);
1548 if (err)
1549 return err;
1550
Vivien Didelotab8ccae2019-10-21 16:51:16 -04001551 err = dsa_switch_touch_ports(ds);
1552 if (err)
1553 return err;
1554
Vivien Didelot975e6e32017-11-03 19:05:27 -04001555 return dsa_switch_parse_ports_of(ds, dn);
1556}
1557
Vivien Didelotfd223e22017-10-27 15:55:14 -04001558static int dsa_port_parse(struct dsa_port *dp, const char *name,
1559 struct device *dev)
1560{
Vivien Didelot6d4e5c52017-10-27 15:55:15 -04001561 if (!strcmp(name, "cpu")) {
Vivien Didelotcbabb0a2017-10-27 15:55:17 -04001562 struct net_device *master;
1563
1564 master = dsa_dev_to_net_device(dev);
1565 if (!master)
1566 return -EPROBE_DEFER;
1567
1568 dev_put(master);
1569
Tobias Waldekranzdeff7102021-04-20 20:53:10 +02001570 return dsa_port_parse_cpu(dp, master, NULL);
Vivien Didelot6d4e5c52017-10-27 15:55:15 -04001571 }
1572
Vivien Didelot06e24d02017-11-03 19:05:29 -04001573 if (!strcmp(name, "dsa"))
1574 return dsa_port_parse_dsa(dp);
Vivien Didelotfd223e22017-10-27 15:55:14 -04001575
Vivien Didelot06e24d02017-11-03 19:05:29 -04001576 return dsa_port_parse_user(dp, name);
Vivien Didelotfd223e22017-10-27 15:55:14 -04001577}
1578
Vivien Didelot975e6e32017-11-03 19:05:27 -04001579static int dsa_switch_parse_ports(struct dsa_switch *ds,
1580 struct dsa_chip_data *cd)
Florian Fainelli71e0bbd2017-02-04 13:02:43 -08001581{
1582 bool valid_name_found = false;
Vivien Didelotfd223e22017-10-27 15:55:14 -04001583 struct dsa_port *dp;
1584 struct device *dev;
1585 const char *name;
Florian Fainelli71e0bbd2017-02-04 13:02:43 -08001586 unsigned int i;
Vivien Didelotfd223e22017-10-27 15:55:14 -04001587 int err;
Florian Fainelli71e0bbd2017-02-04 13:02:43 -08001588
1589 for (i = 0; i < DSA_MAX_PORTS; i++) {
Vivien Didelotfd223e22017-10-27 15:55:14 -04001590 name = cd->port_names[i];
1591 dev = cd->netdev[i];
Vivien Didelot68bb8ea2019-10-21 16:51:15 -04001592 dp = dsa_to_port(ds, i);
Vivien Didelotfd223e22017-10-27 15:55:14 -04001593
1594 if (!name)
Florian Fainelli71e0bbd2017-02-04 13:02:43 -08001595 continue;
1596
Vivien Didelotfd223e22017-10-27 15:55:14 -04001597 err = dsa_port_parse(dp, name, dev);
1598 if (err)
1599 return err;
1600
Florian Fainelli71e0bbd2017-02-04 13:02:43 -08001601 valid_name_found = true;
1602 }
1603
1604 if (!valid_name_found && i == DSA_MAX_PORTS)
1605 return -EINVAL;
1606
1607 return 0;
1608}
1609
Vivien Didelot975e6e32017-11-03 19:05:27 -04001610static int dsa_switch_parse(struct dsa_switch *ds, struct dsa_chip_data *cd)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001611{
Vivien Didelotab8ccae2019-10-21 16:51:16 -04001612 int err;
1613
Vivien Didelot975e6e32017-11-03 19:05:27 -04001614 ds->cd = cd;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001615
Vivien Didelot975e6e32017-11-03 19:05:27 -04001616 /* We don't support interconnected switches nor multiple trees via
1617 * platform data, so this is the unique switch of the tree.
1618 */
1619 ds->index = 0;
1620 ds->dst = dsa_tree_touch(0);
1621 if (!ds->dst)
1622 return -ENOMEM;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001623
Vivien Didelotab8ccae2019-10-21 16:51:16 -04001624 err = dsa_switch_touch_ports(ds);
1625 if (err)
1626 return err;
1627
Vivien Didelot975e6e32017-11-03 19:05:27 -04001628 return dsa_switch_parse_ports(ds, cd);
Florian Fainelli71e0bbd2017-02-04 13:02:43 -08001629}
1630
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001631static void dsa_switch_release_ports(struct dsa_switch *ds)
1632{
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001633 struct dsa_port *dp, *next;
1634
Vladimir Oltean65c563a2021-10-20 20:49:51 +03001635 dsa_switch_for_each_port_safe(dp, next, ds) {
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001636 list_del(&dp->list);
1637 kfree(dp);
1638 }
1639}
1640
Vivien Didelotb4fbb342017-11-06 16:11:53 -05001641static int dsa_switch_probe(struct dsa_switch *ds)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001642{
Vivien Didelot8e5cb842019-10-30 22:09:17 -04001643 struct dsa_switch_tree *dst;
Colin Ian King556f1242019-10-24 11:32:18 +01001644 struct dsa_chip_data *pdata;
1645 struct device_node *np;
Vivien Didelot34c09a82017-11-06 16:11:51 -05001646 int err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001647
Vivien Didelot7e99e342019-10-21 16:51:30 -04001648 if (!ds->dev)
1649 return -ENODEV;
1650
Colin Ian King556f1242019-10-24 11:32:18 +01001651 pdata = ds->dev->platform_data;
1652 np = ds->dev->of_node;
1653
Vivien Didelot7e99e342019-10-21 16:51:30 -04001654 if (!ds->num_ports)
1655 return -EINVAL;
1656
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001657 if (np) {
Vivien Didelot975e6e32017-11-03 19:05:27 -04001658 err = dsa_switch_parse_of(ds, np);
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001659 if (err)
1660 dsa_switch_release_ports(ds);
1661 } else if (pdata) {
Vivien Didelot975e6e32017-11-03 19:05:27 -04001662 err = dsa_switch_parse(ds, pdata);
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001663 if (err)
1664 dsa_switch_release_ports(ds);
1665 } else {
Vivien Didelot975e6e32017-11-03 19:05:27 -04001666 err = -ENODEV;
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001667 }
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001668
Vivien Didelot975e6e32017-11-03 19:05:27 -04001669 if (err)
1670 return err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001671
Vivien Didelot8e5cb842019-10-30 22:09:17 -04001672 dst = ds->dst;
1673 dsa_tree_get(dst);
1674 err = dsa_tree_setup(dst);
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001675 if (err) {
1676 dsa_switch_release_ports(ds);
Vivien Didelot8e5cb842019-10-30 22:09:17 -04001677 dsa_tree_put(dst);
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001678 }
Vivien Didelot8e5cb842019-10-30 22:09:17 -04001679
1680 return err;
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001681}
1682
Vivien Didelot23c9ee42017-05-26 18:12:51 -04001683int dsa_register_switch(struct dsa_switch *ds)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001684{
1685 int err;
1686
1687 mutex_lock(&dsa2_mutex);
Vivien Didelotb4fbb342017-11-06 16:11:53 -05001688 err = dsa_switch_probe(ds);
Vivien Didelot9e741042017-11-24 11:36:06 -05001689 dsa_tree_put(ds->dst);
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001690 mutex_unlock(&dsa2_mutex);
1691
1692 return err;
1693}
1694EXPORT_SYMBOL_GPL(dsa_register_switch);
1695
Vivien Didelotb4fbb342017-11-06 16:11:53 -05001696static void dsa_switch_remove(struct dsa_switch *ds)
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001697{
1698 struct dsa_switch_tree *dst = ds->dst;
Vivien Didelot05f294a2019-10-21 16:51:29 -04001699
Florian Fainellic058f6d2019-11-02 20:13:26 -07001700 dsa_tree_teardown(dst);
Vladimir Oltean6dc43cd2020-01-25 23:01:11 +02001701 dsa_switch_release_ports(ds);
Vivien Didelot8e5cb842019-10-30 22:09:17 -04001702 dsa_tree_put(dst);
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001703}
1704
1705void dsa_unregister_switch(struct dsa_switch *ds)
1706{
1707 mutex_lock(&dsa2_mutex);
Vivien Didelotb4fbb342017-11-06 16:11:53 -05001708 dsa_switch_remove(ds);
Andrew Lunn83c0afa2016-06-04 21:17:07 +02001709 mutex_unlock(&dsa2_mutex);
1710}
1711EXPORT_SYMBOL_GPL(dsa_unregister_switch);
Vladimir Oltean0650bf52021-09-17 16:34:33 +03001712
1713/* If the DSA master chooses to unregister its net_device on .shutdown, DSA is
1714 * blocking that operation from completion, due to the dev_hold taken inside
1715 * netdev_upper_dev_link. Unlink the DSA slave interfaces from being uppers of
1716 * the DSA master, so that the system can reboot successfully.
1717 */
1718void dsa_switch_shutdown(struct dsa_switch *ds)
1719{
1720 struct net_device *master, *slave_dev;
Vladimir Oltean0650bf52021-09-17 16:34:33 +03001721 struct dsa_port *dp;
1722
1723 mutex_lock(&dsa2_mutex);
1724 rtnl_lock();
1725
Vladimir Oltean65c563a2021-10-20 20:49:51 +03001726 dsa_switch_for_each_user_port(dp, ds) {
Vladimir Oltean0650bf52021-09-17 16:34:33 +03001727 master = dp->cpu_dp->master;
1728 slave_dev = dp->slave;
1729
1730 netdev_upper_dev_unlink(master, slave_dev);
Vladimir Oltean0650bf52021-09-17 16:34:33 +03001731 }
Vladimir Olteanee534372022-02-09 14:04:33 +02001732
1733 /* Disconnect from further netdevice notifiers on the master,
1734 * since netdev_uses_dsa() will now return false.
1735 */
1736 dsa_switch_for_each_cpu_port(dp, ds)
1737 dp->master->dsa_ptr = NULL;
Vladimir Oltean0650bf52021-09-17 16:34:33 +03001738
1739 rtnl_unlock();
1740 mutex_unlock(&dsa2_mutex);
1741}
1742EXPORT_SYMBOL_GPL(dsa_switch_shutdown);