blob: f201c33980bf338dcbf9674365dc3bf50e1d657c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00002/*
3 * net/dsa/dsa_priv.h - Hardware switch handling
Lennert Buytenheke84665c2009-03-20 09:52:09 +00004 * Copyright (c) 2008-2009 Marvell Semiconductor
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00005 */
6
7#ifndef __DSA_PRIV_H
8#define __DSA_PRIV_H
9
Vladimir Oltean412a1522020-09-23 14:40:37 -070010#include <linux/if_bridge.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000011#include <linux/phy.h>
Alexander Duyck50753142014-09-15 13:00:19 -040012#include <linux/netdevice.h>
Florian Fainelli04ff53f2015-07-31 11:42:57 -070013#include <linux/netpoll.h>
Vivien Didelotea5dd342017-05-17 15:46:03 -040014#include <net/dsa.h>
Alexander Lobakine131a562020-04-21 16:41:08 +030015#include <net/gro_cells.h>
Alexander Duyck50753142014-09-15 13:00:19 -040016
Vivien Didelot52c96f92017-05-19 17:00:51 -040017enum {
Vivien Didelot1faabf72017-05-19 17:00:52 -040018 DSA_NOTIFIER_AGEING_TIME,
Vivien Didelot52c96f92017-05-19 17:00:51 -040019 DSA_NOTIFIER_BRIDGE_JOIN,
20 DSA_NOTIFIER_BRIDGE_LEAVE,
Vivien Didelot685fb6a2017-05-19 17:00:53 -040021 DSA_NOTIFIER_FDB_ADD,
22 DSA_NOTIFIER_FDB_DEL,
Vladimir Oltean3dc80af2021-06-29 17:06:51 +030023 DSA_NOTIFIER_HOST_FDB_ADD,
24 DSA_NOTIFIER_HOST_FDB_DEL,
George McCollister18596f52021-02-09 19:02:12 -060025 DSA_NOTIFIER_HSR_JOIN,
26 DSA_NOTIFIER_HSR_LEAVE,
Tobias Waldekranz058102a2021-01-13 09:42:53 +010027 DSA_NOTIFIER_LAG_CHANGE,
28 DSA_NOTIFIER_LAG_JOIN,
29 DSA_NOTIFIER_LAG_LEAVE,
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040030 DSA_NOTIFIER_MDB_ADD,
31 DSA_NOTIFIER_MDB_DEL,
Vladimir Olteanb8e997c2021-06-29 17:06:49 +030032 DSA_NOTIFIER_HOST_MDB_ADD,
33 DSA_NOTIFIER_HOST_MDB_DEL,
Vivien Didelotd0c627b2017-05-19 17:00:55 -040034 DSA_NOTIFIER_VLAN_ADD,
35 DSA_NOTIFIER_VLAN_DEL,
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020036 DSA_NOTIFIER_MTU,
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020037 DSA_NOTIFIER_TAG_PROTO,
Horatiu Vulturc595c432021-02-16 22:42:04 +010038 DSA_NOTIFIER_MRP_ADD,
39 DSA_NOTIFIER_MRP_DEL,
40 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
41 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
Vivien Didelot52c96f92017-05-19 17:00:51 -040042};
43
Vivien Didelot1faabf72017-05-19 17:00:52 -040044/* DSA_NOTIFIER_AGEING_TIME */
45struct dsa_notifier_ageing_time_info {
Vivien Didelot1faabf72017-05-19 17:00:52 -040046 unsigned int ageing_time;
Vivien Didelot1faabf72017-05-19 17:00:52 -040047};
48
Vivien Didelot52c96f92017-05-19 17:00:51 -040049/* DSA_NOTIFIER_BRIDGE_* */
50struct dsa_notifier_bridge_info {
51 struct net_device *br;
Vladimir Olteanf66a6a62020-05-10 19:37:41 +030052 int tree_index;
Vivien Didelot52c96f92017-05-19 17:00:51 -040053 int sw_index;
54 int port;
55};
56
Vivien Didelot685fb6a2017-05-19 17:00:53 -040057/* DSA_NOTIFIER_FDB_* */
58struct dsa_notifier_fdb_info {
Vivien Didelot685fb6a2017-05-19 17:00:53 -040059 int sw_index;
60 int port;
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +030061 const unsigned char *addr;
62 u16 vid;
Vivien Didelot685fb6a2017-05-19 17:00:53 -040063};
64
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040065/* DSA_NOTIFIER_MDB_* */
66struct dsa_notifier_mdb_info {
67 const struct switchdev_obj_port_mdb *mdb;
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040068 int sw_index;
69 int port;
70};
71
Tobias Waldekranz058102a2021-01-13 09:42:53 +010072/* DSA_NOTIFIER_LAG_* */
73struct dsa_notifier_lag_info {
74 struct net_device *lag;
75 int sw_index;
76 int port;
77
78 struct netdev_lag_upper_info *info;
79};
80
Vivien Didelotd0c627b2017-05-19 17:00:55 -040081/* DSA_NOTIFIER_VLAN_* */
82struct dsa_notifier_vlan_info {
83 const struct switchdev_obj_port_vlan *vlan;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040084 int sw_index;
85 int port;
Vladimir Oltean31046a52021-02-13 22:43:18 +020086 struct netlink_ext_ack *extack;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040087};
88
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020089/* DSA_NOTIFIER_MTU */
90struct dsa_notifier_mtu_info {
Vladimir Oltean88faba22021-06-21 19:42:18 +030091 bool targeted_match;
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020092 int sw_index;
93 int port;
94 int mtu;
95};
96
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020097/* DSA_NOTIFIER_TAG_PROTO_* */
98struct dsa_notifier_tag_proto_info {
99 const struct dsa_device_ops *tag_ops;
100};
101
Horatiu Vulturc595c432021-02-16 22:42:04 +0100102/* DSA_NOTIFIER_MRP_* */
103struct dsa_notifier_mrp_info {
104 const struct switchdev_obj_mrp *mrp;
105 int sw_index;
106 int port;
107};
108
109/* DSA_NOTIFIER_MRP_* */
110struct dsa_notifier_mrp_ring_role_info {
111 const struct switchdev_obj_ring_role_mrp *mrp;
112 int sw_index;
113 int port;
114};
115
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200116struct dsa_switchdev_event_work {
117 struct dsa_switch *ds;
118 int port;
Vladimir Oltean4bed3972021-06-29 17:06:57 +0300119 struct net_device *dev;
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200120 struct work_struct work;
121 unsigned long event;
122 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
123 * SWITCHDEV_FDB_DEL_TO_DEVICE
124 */
125 unsigned char addr[ETH_ALEN];
126 u16 vid;
Vladimir Oltean3dc80af2021-06-29 17:06:51 +0300127 bool host_addr;
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200128};
129
George McCollister18596f52021-02-09 19:02:12 -0600130/* DSA_NOTIFIER_HSR_* */
131struct dsa_notifier_hsr_info {
132 struct net_device *hsr;
133 int sw_index;
134 int port;
135};
136
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000137struct dsa_slave_priv {
Vivien Didelot15240242017-09-29 17:19:18 -0400138 /* Copy of CPU port xmit for faster access in slave transmit hot path */
Florian Fainelli4ed70ce2015-07-31 11:42:56 -0700139 struct sk_buff * (*xmit)(struct sk_buff *skb,
Alexander Duyck50753142014-09-15 13:00:19 -0400140 struct net_device *dev);
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000141
Alexander Lobakine131a562020-04-21 16:41:08 +0300142 struct gro_cells gcells;
143
Vivien Didelotafdcf152017-01-27 15:29:39 -0500144 /* DSA port data, such as switch, port index, etc. */
145 struct dsa_port *dp;
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000146
Florian Fainelli04ff53f2015-07-31 11:42:57 -0700147#ifdef CONFIG_NET_POLL_CONTROLLER
148 struct netpoll *netpoll;
149#endif
Florian Fainellif50f2122017-01-30 12:41:40 -0800150
151 /* TC context */
152 struct list_head mall_tc_list;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000153};
154
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000155/* dsa.c */
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200156const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
Andrew Lunn4dad81e2019-04-28 19:37:19 +0200157void dsa_tag_driver_put(const struct dsa_device_ops *ops);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200158const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200159
Arkadi Sharshevskyc9eb3e02017-08-06 16:15:42 +0300160bool dsa_schedule_work(struct work_struct *work);
Florian Fainelli98cdb482018-09-07 11:09:02 -0700161const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000162
Vladimir Oltean4e500252021-06-11 22:01:24 +0300163static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
164{
165 return ops->needed_headroom + ops->needed_tailroom;
166}
167
Vivien Didelotf2f23562017-09-19 11:57:00 -0400168/* master.c */
Vivien Didelot17a22fc2017-11-06 16:11:45 -0500169int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
170void dsa_master_teardown(struct net_device *dev);
Vivien Didelotf2f23562017-09-19 11:57:00 -0400171
Vivien Didelot2231c432017-10-16 11:12:17 -0400172static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
173 int device, int port)
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400174{
Vivien Didelot2f657a62017-09-29 17:19:20 -0400175 struct dsa_port *cpu_dp = dev->dsa_ptr;
176 struct dsa_switch_tree *dst = cpu_dp->dst;
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400177 struct dsa_port *dp;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400178
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400179 list_for_each_entry(dp, &dst->ports, list)
180 if (dp->ds->index == device && dp->index == port &&
181 dp->type == DSA_PORT_TYPE_USER)
182 return dp->slave;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400183
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400184 return NULL;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400185}
186
Vivien Didelota40c1752017-05-19 17:00:44 -0400187/* port.c */
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200188void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
189 const struct dsa_device_ops *tag_ops);
Vladimir Olteanbae33f22021-01-09 02:01:50 +0200190int dsa_port_set_state(struct dsa_port *dp, u8 state);
Russell King8640f8d2020-03-03 15:01:46 +0000191int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
Vivien Didelotfb8a6a22017-09-22 19:01:56 -0400192int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
Russell King8640f8d2020-03-03 15:01:46 +0000193void dsa_port_disable_rt(struct dsa_port *dp);
Andrew Lunn75104db2019-02-24 20:44:43 +0100194void dsa_port_disable(struct dsa_port *dp);
Vladimir Oltean2afc5262021-03-23 01:51:48 +0200195int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
196 struct netlink_ext_ack *extack);
Vladimir Oltean74918942021-06-27 14:54:29 +0300197int dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br,
198 struct netlink_ext_ack *extack);
Vivien Didelotcfbed322017-05-19 17:00:45 -0400199void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100200int dsa_port_lag_change(struct dsa_port *dp,
201 struct netdev_lag_lower_state_info *linfo);
202int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
Vladimir Oltean2afc5262021-03-23 01:51:48 +0200203 struct netdev_lag_upper_info *uinfo,
204 struct netlink_ext_ack *extack);
Vladimir Oltean74918942021-06-27 14:54:29 +0300205int dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev,
206 struct netlink_ext_ack *extack);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100207void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
Vladimir Oltean89153ed2021-02-13 22:43:19 +0200208int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
209 struct netlink_ext_ack *extack);
Russell King54a0ed02020-05-12 20:20:25 +0300210bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
Vladimir Olteanbae33f22021-01-09 02:01:50 +0200211int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
Vladimir Olteanbfcb8132020-03-27 21:55:42 +0200212int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
Vladimir Oltean88faba22021-06-21 19:42:18 +0300213 bool targeted_match);
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +0300214int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
215 u16 vid);
216int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
217 u16 vid);
Vladimir Oltean3dc80af2021-06-29 17:06:51 +0300218int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
219 u16 vid);
220int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
221 u16 vid);
Vivien Didelotde40fc52017-09-20 19:32:14 -0400222int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100223int dsa_port_mdb_add(const struct dsa_port *dp,
Vladimir Olteanffb68fc2021-01-09 02:01:48 +0200224 const struct switchdev_obj_port_mdb *mdb);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100225int dsa_port_mdb_del(const struct dsa_port *dp,
Vivien Didelot3a9afea2017-05-19 17:00:49 -0400226 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteanb8e997c2021-06-29 17:06:49 +0300227int dsa_port_host_mdb_add(const struct dsa_port *dp,
228 const struct switchdev_obj_port_mdb *mdb);
229int dsa_port_host_mdb_del(const struct dsa_port *dp,
230 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteane18f4c12021-02-12 17:15:55 +0200231int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200232 struct switchdev_brport_flags flags,
233 struct netlink_ext_ack *extack);
Vladimir Olteane18f4c12021-02-12 17:15:55 +0200234int dsa_port_bridge_flags(const struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200235 struct switchdev_brport_flags flags,
236 struct netlink_ext_ack *extack);
237int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
238 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400239int dsa_port_vlan_add(struct dsa_port *dp,
Vladimir Oltean31046a52021-02-13 22:43:18 +0200240 const struct switchdev_obj_port_vlan *vlan,
241 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400242int dsa_port_vlan_del(struct dsa_port *dp,
243 const struct switchdev_obj_port_vlan *vlan);
Horatiu Vulturc595c432021-02-16 22:42:04 +0100244int dsa_port_mrp_add(const struct dsa_port *dp,
245 const struct switchdev_obj_mrp *mrp);
246int dsa_port_mrp_del(const struct dsa_port *dp,
247 const struct switchdev_obj_mrp *mrp);
248int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
249 const struct switchdev_obj_ring_role_mrp *mrp);
250int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
251 const struct switchdev_obj_ring_role_mrp *mrp);
Sebastian Reichel33615362018-01-23 16:03:46 +0100252int dsa_port_link_register_of(struct dsa_port *dp);
253void dsa_port_link_unregister_of(struct dsa_port *dp);
George McCollister18596f52021-02-09 19:02:12 -0600254int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
255void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
Ioana Ciornei77373d42019-05-28 20:38:15 +0300256extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
Vivien Didelot57ab1ca2017-10-26 10:50:07 -0400257
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200258static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
259 struct net_device *dev)
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100260{
Tobias Waldekranzcc76ce92021-03-18 20:25:33 +0100261 return dsa_port_to_bridge_port(dp) == dev;
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100262}
263
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200264static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
265 struct net_device *bridge_dev)
266{
267 /* DSA ports connected to a bridge, and event was emitted
268 * for the bridge.
269 */
270 return dp->bridge_dev == bridge_dev;
271}
272
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200273/* Returns true if any port of this tree offloads the given net_device */
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200274static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
275 struct net_device *dev)
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200276{
277 struct dsa_port *dp;
278
279 list_for_each_entry(dp, &dst->ports, list)
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200280 if (dsa_port_offloads_bridge_port(dp, dev))
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200281 return true;
282
283 return false;
284}
285
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000286/* slave.c */
Alexander Duyck50753142014-09-15 13:00:19 -0400287extern const struct dsa_device_ops notag_netdev_ops;
Vladimir Oltean010e2692021-03-23 01:51:50 +0200288extern struct notifier_block dsa_slave_switchdev_notifier;
289extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
290
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000291void dsa_slave_mii_bus_init(struct dsa_switch *ds);
Vivien Didelot951259aa2017-10-27 15:55:19 -0400292int dsa_slave_create(struct dsa_port *dp);
Neil Armstrongcda5c152015-12-07 13:57:35 +0100293void dsa_slave_destroy(struct net_device *slave_dev);
Florian Fainelli24462542014-09-18 17:31:22 -0700294int dsa_slave_suspend(struct net_device *slave_dev);
295int dsa_slave_resume(struct net_device *slave_dev);
Vivien Didelot88e4f0c2017-02-03 13:20:16 -0500296int dsa_slave_register_notifier(void);
297void dsa_slave_unregister_notifier(void);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200298void dsa_slave_setup_tagger(struct net_device *slave);
299int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000300
Vivien Didelotd9450972017-10-16 11:12:15 -0400301static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
302{
303 struct dsa_slave_priv *p = netdev_priv(dev);
304
305 return p->dp;
306}
307
Vivien Didelotd0006b02017-10-16 11:12:16 -0400308static inline struct net_device *
309dsa_slave_to_master(const struct net_device *dev)
310{
311 struct dsa_port *dp = dsa_slave_to_port(dev);
312
Vivien Didelotf8b8b1c2017-10-16 11:12:18 -0400313 return dp->cpu_dp->master;
Vivien Didelotd0006b02017-10-16 11:12:16 -0400314}
315
Vladimir Oltean412a1522020-09-23 14:40:37 -0700316/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
317 * frames as untagged, since the bridge will not untag them.
318 */
319static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
320{
321 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
Vladimir Oltean412a1522020-09-23 14:40:37 -0700322 struct net_device *br = dp->bridge_dev;
323 struct net_device *dev = skb->dev;
324 struct net_device *upper_dev;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700325 u16 vid, pvid, proto;
326 int err;
327
328 if (!br || br_vlan_enabled(br))
329 return skb;
330
331 err = br_vlan_get_proto(br, &proto);
332 if (err)
333 return skb;
334
335 /* Move VLAN tag from data to hwaccel */
Florian Fainellia3482922020-10-01 19:42:14 -0700336 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
Vladimir Oltean412a1522020-09-23 14:40:37 -0700337 skb = skb_vlan_untag(skb);
338 if (!skb)
339 return NULL;
340 }
341
342 if (!skb_vlan_tag_present(skb))
343 return skb;
344
345 vid = skb_vlan_tag_get_id(skb);
346
347 /* We already run under an RCU read-side critical section since
348 * we are called from netif_receive_skb_list_internal().
349 */
350 err = br_vlan_get_pvid_rcu(dev, &pvid);
351 if (err)
352 return skb;
353
354 if (vid != pvid)
355 return skb;
356
357 /* The sad part about attempting to untag from DSA is that we
358 * don't know, unless we check, if the skb will end up in
359 * the bridge's data path - br_allowed_ingress() - or not.
360 * For example, there might be an 8021q upper for the
361 * default_pvid of the bridge, which will steal VLAN-tagged traffic
362 * from the bridge's data path. This is a configuration that DSA
363 * supports because vlan_filtering is 0. In that case, we should
364 * definitely keep the tag, to make sure it keeps working.
365 */
Florian Fainelli3a688442020-10-01 19:42:15 -0700366 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
367 if (upper_dev)
368 return skb;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700369
370 __vlan_hwaccel_clear_tag(skb);
371
372 return skb;
373}
374
Vivien Didelotf515f192017-02-03 13:20:20 -0500375/* switch.c */
376int dsa_switch_register_notifier(struct dsa_switch *ds);
377void dsa_switch_unregister_notifier(struct dsa_switch *ds);
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200378
379/* dsa2.c */
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100380void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
381void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
Vladimir Oltean886f8e22021-01-29 03:00:04 +0200382int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
383int dsa_broadcast(unsigned long e, void *v);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200384int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
385 struct net_device *master,
386 const struct dsa_device_ops *tag_ops,
387 const struct dsa_device_ops *old_tag_ops);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100388
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200389extern struct list_head dsa_tree_list;
390
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000391#endif