blob: 28e1fbe64ee0dfb8ba5a829c1b69340e75f8d27a [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00002/*
3 * net/dsa/dsa_priv.h - Hardware switch handling
Lennert Buytenheke84665c2009-03-20 09:52:09 +00004 * Copyright (c) 2008-2009 Marvell Semiconductor
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00005 */
6
7#ifndef __DSA_PRIV_H
8#define __DSA_PRIV_H
9
Vladimir Oltean412a1522020-09-23 14:40:37 -070010#include <linux/if_bridge.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000011#include <linux/phy.h>
Alexander Duyck50753142014-09-15 13:00:19 -040012#include <linux/netdevice.h>
Florian Fainelli04ff53f2015-07-31 11:42:57 -070013#include <linux/netpoll.h>
Vivien Didelotea5dd342017-05-17 15:46:03 -040014#include <net/dsa.h>
Alexander Lobakine131a562020-04-21 16:41:08 +030015#include <net/gro_cells.h>
Alexander Duyck50753142014-09-15 13:00:19 -040016
Vladimir Oltean123abc062021-07-22 18:55:40 +030017#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
18
Vivien Didelot52c96f92017-05-19 17:00:51 -040019enum {
Vivien Didelot1faabf72017-05-19 17:00:52 -040020 DSA_NOTIFIER_AGEING_TIME,
Vivien Didelot52c96f92017-05-19 17:00:51 -040021 DSA_NOTIFIER_BRIDGE_JOIN,
22 DSA_NOTIFIER_BRIDGE_LEAVE,
Vivien Didelot685fb6a2017-05-19 17:00:53 -040023 DSA_NOTIFIER_FDB_ADD,
24 DSA_NOTIFIER_FDB_DEL,
Vladimir Oltean3dc80af2021-06-29 17:06:51 +030025 DSA_NOTIFIER_HOST_FDB_ADD,
26 DSA_NOTIFIER_HOST_FDB_DEL,
George McCollister18596f52021-02-09 19:02:12 -060027 DSA_NOTIFIER_HSR_JOIN,
28 DSA_NOTIFIER_HSR_LEAVE,
Tobias Waldekranz058102a2021-01-13 09:42:53 +010029 DSA_NOTIFIER_LAG_CHANGE,
30 DSA_NOTIFIER_LAG_JOIN,
31 DSA_NOTIFIER_LAG_LEAVE,
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040032 DSA_NOTIFIER_MDB_ADD,
33 DSA_NOTIFIER_MDB_DEL,
Vladimir Olteanb8e997c2021-06-29 17:06:49 +030034 DSA_NOTIFIER_HOST_MDB_ADD,
35 DSA_NOTIFIER_HOST_MDB_DEL,
Vivien Didelotd0c627b2017-05-19 17:00:55 -040036 DSA_NOTIFIER_VLAN_ADD,
37 DSA_NOTIFIER_VLAN_DEL,
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020038 DSA_NOTIFIER_MTU,
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020039 DSA_NOTIFIER_TAG_PROTO,
Horatiu Vulturc595c432021-02-16 22:42:04 +010040 DSA_NOTIFIER_MRP_ADD,
41 DSA_NOTIFIER_MRP_DEL,
42 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
43 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
Vladimir Olteanc64b9c02021-07-19 20:14:52 +030044 DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
45 DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
Vivien Didelot52c96f92017-05-19 17:00:51 -040046};
47
Vivien Didelot1faabf72017-05-19 17:00:52 -040048/* DSA_NOTIFIER_AGEING_TIME */
49struct dsa_notifier_ageing_time_info {
Vivien Didelot1faabf72017-05-19 17:00:52 -040050 unsigned int ageing_time;
Vivien Didelot1faabf72017-05-19 17:00:52 -040051};
52
Vivien Didelot52c96f92017-05-19 17:00:51 -040053/* DSA_NOTIFIER_BRIDGE_* */
54struct dsa_notifier_bridge_info {
55 struct net_device *br;
Vladimir Olteanf66a6a62020-05-10 19:37:41 +030056 int tree_index;
Vivien Didelot52c96f92017-05-19 17:00:51 -040057 int sw_index;
58 int port;
59};
60
Vivien Didelot685fb6a2017-05-19 17:00:53 -040061/* DSA_NOTIFIER_FDB_* */
62struct dsa_notifier_fdb_info {
Vivien Didelot685fb6a2017-05-19 17:00:53 -040063 int sw_index;
64 int port;
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +030065 const unsigned char *addr;
66 u16 vid;
Vivien Didelot685fb6a2017-05-19 17:00:53 -040067};
68
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040069/* DSA_NOTIFIER_MDB_* */
70struct dsa_notifier_mdb_info {
71 const struct switchdev_obj_port_mdb *mdb;
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040072 int sw_index;
73 int port;
74};
75
Tobias Waldekranz058102a2021-01-13 09:42:53 +010076/* DSA_NOTIFIER_LAG_* */
77struct dsa_notifier_lag_info {
78 struct net_device *lag;
79 int sw_index;
80 int port;
81
82 struct netdev_lag_upper_info *info;
83};
84
Vivien Didelotd0c627b2017-05-19 17:00:55 -040085/* DSA_NOTIFIER_VLAN_* */
86struct dsa_notifier_vlan_info {
87 const struct switchdev_obj_port_vlan *vlan;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040088 int sw_index;
89 int port;
Vladimir Oltean31046a52021-02-13 22:43:18 +020090 struct netlink_ext_ack *extack;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040091};
92
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020093/* DSA_NOTIFIER_MTU */
94struct dsa_notifier_mtu_info {
Vladimir Oltean88faba22021-06-21 19:42:18 +030095 bool targeted_match;
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020096 int sw_index;
97 int port;
98 int mtu;
99};
100
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200101/* DSA_NOTIFIER_TAG_PROTO_* */
102struct dsa_notifier_tag_proto_info {
103 const struct dsa_device_ops *tag_ops;
104};
105
Horatiu Vulturc595c432021-02-16 22:42:04 +0100106/* DSA_NOTIFIER_MRP_* */
107struct dsa_notifier_mrp_info {
108 const struct switchdev_obj_mrp *mrp;
109 int sw_index;
110 int port;
111};
112
113/* DSA_NOTIFIER_MRP_* */
114struct dsa_notifier_mrp_ring_role_info {
115 const struct switchdev_obj_ring_role_mrp *mrp;
116 int sw_index;
117 int port;
118};
119
Vladimir Olteanc64b9c02021-07-19 20:14:52 +0300120/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
121struct dsa_notifier_tag_8021q_vlan_info {
122 int tree_index;
123 int sw_index;
124 int port;
125 u16 vid;
126};
127
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200128struct dsa_switchdev_event_work {
129 struct dsa_switch *ds;
130 int port;
Vladimir Oltean4bed3972021-06-29 17:06:57 +0300131 struct net_device *dev;
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200132 struct work_struct work;
133 unsigned long event;
134 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
135 * SWITCHDEV_FDB_DEL_TO_DEVICE
136 */
137 unsigned char addr[ETH_ALEN];
138 u16 vid;
Vladimir Oltean3dc80af2021-06-29 17:06:51 +0300139 bool host_addr;
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200140};
141
George McCollister18596f52021-02-09 19:02:12 -0600142/* DSA_NOTIFIER_HSR_* */
143struct dsa_notifier_hsr_info {
144 struct net_device *hsr;
145 int sw_index;
146 int port;
147};
148
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000149struct dsa_slave_priv {
Vivien Didelot15240242017-09-29 17:19:18 -0400150 /* Copy of CPU port xmit for faster access in slave transmit hot path */
Florian Fainelli4ed70ce2015-07-31 11:42:56 -0700151 struct sk_buff * (*xmit)(struct sk_buff *skb,
Alexander Duyck50753142014-09-15 13:00:19 -0400152 struct net_device *dev);
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000153
Alexander Lobakine131a562020-04-21 16:41:08 +0300154 struct gro_cells gcells;
155
Vivien Didelotafdcf152017-01-27 15:29:39 -0500156 /* DSA port data, such as switch, port index, etc. */
157 struct dsa_port *dp;
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000158
Florian Fainelli04ff53f2015-07-31 11:42:57 -0700159#ifdef CONFIG_NET_POLL_CONTROLLER
160 struct netpoll *netpoll;
161#endif
Florian Fainellif50f2122017-01-30 12:41:40 -0800162
163 /* TC context */
164 struct list_head mall_tc_list;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000165};
166
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000167/* dsa.c */
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200168const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
Andrew Lunn4dad81e2019-04-28 19:37:19 +0200169void dsa_tag_driver_put(const struct dsa_device_ops *ops);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200170const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200171
Arkadi Sharshevskyc9eb3e02017-08-06 16:15:42 +0300172bool dsa_schedule_work(struct work_struct *work);
Florian Fainelli98cdb482018-09-07 11:09:02 -0700173const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000174
Vladimir Oltean4e500252021-06-11 22:01:24 +0300175static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
176{
177 return ops->needed_headroom + ops->needed_tailroom;
178}
179
Vivien Didelotf2f23562017-09-19 11:57:00 -0400180/* master.c */
Vivien Didelot17a22fc2017-11-06 16:11:45 -0500181int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
182void dsa_master_teardown(struct net_device *dev);
Vivien Didelotf2f23562017-09-19 11:57:00 -0400183
Vivien Didelot2231c432017-10-16 11:12:17 -0400184static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
185 int device, int port)
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400186{
Vivien Didelot2f657a62017-09-29 17:19:20 -0400187 struct dsa_port *cpu_dp = dev->dsa_ptr;
188 struct dsa_switch_tree *dst = cpu_dp->dst;
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400189 struct dsa_port *dp;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400190
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400191 list_for_each_entry(dp, &dst->ports, list)
192 if (dp->ds->index == device && dp->index == port &&
193 dp->type == DSA_PORT_TYPE_USER)
194 return dp->slave;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400195
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400196 return NULL;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400197}
198
Vivien Didelota40c1752017-05-19 17:00:44 -0400199/* port.c */
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200200void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
201 const struct dsa_device_ops *tag_ops);
Vladimir Oltean39f32102021-08-08 14:16:37 +0300202int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
Russell King8640f8d2020-03-03 15:01:46 +0000203int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
Vivien Didelotfb8a6a22017-09-22 19:01:56 -0400204int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
Russell King8640f8d2020-03-03 15:01:46 +0000205void dsa_port_disable_rt(struct dsa_port *dp);
Andrew Lunn75104db2019-02-24 20:44:43 +0100206void dsa_port_disable(struct dsa_port *dp);
Vladimir Oltean2afc5262021-03-23 01:51:48 +0200207int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
208 struct netlink_ext_ack *extack);
Vladimir Oltean4e51bf42021-07-21 19:24:03 +0300209void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
Vivien Didelotcfbed322017-05-19 17:00:45 -0400210void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100211int dsa_port_lag_change(struct dsa_port *dp,
212 struct netdev_lag_lower_state_info *linfo);
213int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
Vladimir Oltean2afc5262021-03-23 01:51:48 +0200214 struct netdev_lag_upper_info *uinfo,
215 struct netlink_ext_ack *extack);
Vladimir Oltean4e51bf42021-07-21 19:24:03 +0300216void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100217void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
Vladimir Oltean89153ed2021-02-13 22:43:19 +0200218int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
219 struct netlink_ext_ack *extack);
Russell King54a0ed02020-05-12 20:20:25 +0300220bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
Vladimir Olteanbae33f22021-01-09 02:01:50 +0200221int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
Vladimir Olteanbfcb8132020-03-27 21:55:42 +0200222int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
Vladimir Oltean88faba22021-06-21 19:42:18 +0300223 bool targeted_match);
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +0300224int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
225 u16 vid);
226int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
227 u16 vid);
Vladimir Oltean3dc80af2021-06-29 17:06:51 +0300228int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
229 u16 vid);
230int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
231 u16 vid);
Vivien Didelotde40fc52017-09-20 19:32:14 -0400232int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100233int dsa_port_mdb_add(const struct dsa_port *dp,
Vladimir Olteanffb68fc2021-01-09 02:01:48 +0200234 const struct switchdev_obj_port_mdb *mdb);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100235int dsa_port_mdb_del(const struct dsa_port *dp,
Vivien Didelot3a9afea2017-05-19 17:00:49 -0400236 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteanb8e997c2021-06-29 17:06:49 +0300237int dsa_port_host_mdb_add(const struct dsa_port *dp,
238 const struct switchdev_obj_port_mdb *mdb);
239int dsa_port_host_mdb_del(const struct dsa_port *dp,
240 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteane18f4c12021-02-12 17:15:55 +0200241int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200242 struct switchdev_brport_flags flags,
243 struct netlink_ext_ack *extack);
Vladimir Oltean045c45d2021-08-08 17:35:23 +0300244int dsa_port_bridge_flags(struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200245 struct switchdev_brport_flags flags,
246 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400247int dsa_port_vlan_add(struct dsa_port *dp,
Vladimir Oltean31046a52021-02-13 22:43:18 +0200248 const struct switchdev_obj_port_vlan *vlan,
249 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400250int dsa_port_vlan_del(struct dsa_port *dp,
251 const struct switchdev_obj_port_vlan *vlan);
Horatiu Vulturc595c432021-02-16 22:42:04 +0100252int dsa_port_mrp_add(const struct dsa_port *dp,
253 const struct switchdev_obj_mrp *mrp);
254int dsa_port_mrp_del(const struct dsa_port *dp,
255 const struct switchdev_obj_mrp *mrp);
256int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
257 const struct switchdev_obj_ring_role_mrp *mrp);
258int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
259 const struct switchdev_obj_ring_role_mrp *mrp);
Sebastian Reichel33615362018-01-23 16:03:46 +0100260int dsa_port_link_register_of(struct dsa_port *dp);
261void dsa_port_link_unregister_of(struct dsa_port *dp);
George McCollister18596f52021-02-09 19:02:12 -0600262int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
263void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
Vladimir Olteanc64b9c02021-07-19 20:14:52 +0300264int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid);
265void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid);
Ioana Ciornei77373d42019-05-28 20:38:15 +0300266extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
Vivien Didelot57ab1ca2017-10-26 10:50:07 -0400267
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200268static inline bool dsa_port_offloads_bridge_port(struct dsa_port *dp,
Vladimir Olteanb94dc992021-07-19 16:51:40 +0300269 const struct net_device *dev)
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100270{
Tobias Waldekranzcc76ce92021-03-18 20:25:33 +0100271 return dsa_port_to_bridge_port(dp) == dev;
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100272}
273
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200274static inline bool dsa_port_offloads_bridge(struct dsa_port *dp,
Vladimir Olteanb94dc992021-07-19 16:51:40 +0300275 const struct net_device *bridge_dev)
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200276{
277 /* DSA ports connected to a bridge, and event was emitted
278 * for the bridge.
279 */
280 return dp->bridge_dev == bridge_dev;
281}
282
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200283/* Returns true if any port of this tree offloads the given net_device */
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200284static inline bool dsa_tree_offloads_bridge_port(struct dsa_switch_tree *dst,
Vladimir Olteanb94dc992021-07-19 16:51:40 +0300285 const struct net_device *dev)
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200286{
287 struct dsa_port *dp;
288
289 list_for_each_entry(dp, &dst->ports, list)
Vladimir Oltean03cbb872021-03-07 12:21:56 +0200290 if (dsa_port_offloads_bridge_port(dp, dev))
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200291 return true;
292
293 return false;
294}
295
Vladimir Olteanb94dc992021-07-19 16:51:40 +0300296/* Returns true if any port of this tree offloads the given bridge */
297static inline bool dsa_tree_offloads_bridge(struct dsa_switch_tree *dst,
298 const struct net_device *bridge_dev)
299{
300 struct dsa_port *dp;
301
302 list_for_each_entry(dp, &dst->ports, list)
303 if (dsa_port_offloads_bridge(dp, bridge_dev))
304 return true;
305
306 return false;
307}
308
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000309/* slave.c */
Alexander Duyck50753142014-09-15 13:00:19 -0400310extern const struct dsa_device_ops notag_netdev_ops;
Vladimir Oltean010e2692021-03-23 01:51:50 +0200311extern struct notifier_block dsa_slave_switchdev_notifier;
312extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
313
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000314void dsa_slave_mii_bus_init(struct dsa_switch *ds);
Vivien Didelot951259aa2017-10-27 15:55:19 -0400315int dsa_slave_create(struct dsa_port *dp);
Neil Armstrongcda5c152015-12-07 13:57:35 +0100316void dsa_slave_destroy(struct net_device *slave_dev);
Florian Fainelli24462542014-09-18 17:31:22 -0700317int dsa_slave_suspend(struct net_device *slave_dev);
318int dsa_slave_resume(struct net_device *slave_dev);
Vivien Didelot88e4f0c2017-02-03 13:20:16 -0500319int dsa_slave_register_notifier(void);
320void dsa_slave_unregister_notifier(void);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200321void dsa_slave_setup_tagger(struct net_device *slave);
322int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000323
Vivien Didelotd9450972017-10-16 11:12:15 -0400324static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
325{
326 struct dsa_slave_priv *p = netdev_priv(dev);
327
328 return p->dp;
329}
330
Vivien Didelotd0006b02017-10-16 11:12:16 -0400331static inline struct net_device *
332dsa_slave_to_master(const struct net_device *dev)
333{
334 struct dsa_port *dp = dsa_slave_to_port(dev);
335
Vivien Didelotf8b8b1c2017-10-16 11:12:18 -0400336 return dp->cpu_dp->master;
Vivien Didelotd0006b02017-10-16 11:12:16 -0400337}
338
Vladimir Oltean412a1522020-09-23 14:40:37 -0700339/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
340 * frames as untagged, since the bridge will not untag them.
341 */
342static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
343{
344 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
Vladimir Oltean412a1522020-09-23 14:40:37 -0700345 struct net_device *br = dp->bridge_dev;
346 struct net_device *dev = skb->dev;
347 struct net_device *upper_dev;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700348 u16 vid, pvid, proto;
349 int err;
350
351 if (!br || br_vlan_enabled(br))
352 return skb;
353
354 err = br_vlan_get_proto(br, &proto);
355 if (err)
356 return skb;
357
358 /* Move VLAN tag from data to hwaccel */
Florian Fainellia3482922020-10-01 19:42:14 -0700359 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
Vladimir Oltean412a1522020-09-23 14:40:37 -0700360 skb = skb_vlan_untag(skb);
361 if (!skb)
362 return NULL;
363 }
364
365 if (!skb_vlan_tag_present(skb))
366 return skb;
367
368 vid = skb_vlan_tag_get_id(skb);
369
370 /* We already run under an RCU read-side critical section since
371 * we are called from netif_receive_skb_list_internal().
372 */
373 err = br_vlan_get_pvid_rcu(dev, &pvid);
374 if (err)
375 return skb;
376
377 if (vid != pvid)
378 return skb;
379
380 /* The sad part about attempting to untag from DSA is that we
381 * don't know, unless we check, if the skb will end up in
382 * the bridge's data path - br_allowed_ingress() - or not.
383 * For example, there might be an 8021q upper for the
384 * default_pvid of the bridge, which will steal VLAN-tagged traffic
385 * from the bridge's data path. This is a configuration that DSA
386 * supports because vlan_filtering is 0. In that case, we should
387 * definitely keep the tag, to make sure it keeps working.
388 */
Florian Fainelli3a688442020-10-01 19:42:15 -0700389 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
390 if (upper_dev)
391 return skb;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700392
393 __vlan_hwaccel_clear_tag(skb);
394
395 return skb;
396}
397
Vladimir Oltean884be122021-07-26 19:55:34 +0300398/* For switches without hardware support for DSA tagging to be able
399 * to support termination through the bridge.
400 */
401static inline struct net_device *
402dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
403{
404 struct dsa_port *cpu_dp = master->dsa_ptr;
405 struct dsa_switch_tree *dst = cpu_dp->dst;
406 struct bridge_vlan_info vinfo;
407 struct net_device *slave;
408 struct dsa_port *dp;
409 int err;
410
411 list_for_each_entry(dp, &dst->ports, list) {
412 if (dp->type != DSA_PORT_TYPE_USER)
413 continue;
414
415 if (!dp->bridge_dev)
416 continue;
417
418 if (dp->stp_state != BR_STATE_LEARNING &&
419 dp->stp_state != BR_STATE_FORWARDING)
420 continue;
421
422 /* Since the bridge might learn this packet, keep the CPU port
423 * affinity with the port that will be used for the reply on
424 * xmit.
425 */
426 if (dp->cpu_dp != cpu_dp)
427 continue;
428
429 slave = dp->slave;
430
431 err = br_vlan_get_info_rcu(slave, vid, &vinfo);
432 if (err)
433 continue;
434
435 return slave;
436 }
437
438 return NULL;
439}
440
Vladimir Olteanbea79072021-07-29 17:56:00 +0300441/* If the ingress port offloads the bridge, we mark the frame as autonomously
442 * forwarded by hardware, so the software bridge doesn't forward in twice, back
443 * to us, because we already did. However, if we're in fallback mode and we do
444 * software bridging, we are not offloading it, therefore the dp->bridge_dev
445 * pointer is not populated, and flooding needs to be done by software (we are
446 * effectively operating in standalone ports mode).
447 */
448static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
449{
450 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
451
452 skb->offload_fwd_mark = !!(dp->bridge_dev);
453}
454
Vladimir Olteanf1dacd72021-08-10 16:13:53 +0300455/* Helper for removing DSA header tags from packets in the RX path.
456 * Must not be called before skb_pull(len).
457 * skb->data
458 * |
459 * v
460 * | | | | | | | | | | | | | | | | | | |
461 * +-----------------------+-----------------------+---------------+-------+
462 * | Destination MAC | Source MAC | DSA header | EType |
463 * +-----------------------+-----------------------+---------------+-------+
464 * | |
465 * <----- len -----> <----- len ----->
466 * |
467 * >>>>>>> v
468 * >>>>>>> | | | | | | | | | | | | | | |
469 * >>>>>>> +-----------------------+-----------------------+-------+
470 * >>>>>>> | Destination MAC | Source MAC | EType |
471 * +-----------------------+-----------------------+-------+
472 * ^
473 * |
474 * skb->data
475 */
476static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
477{
478 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
479}
480
Vladimir Oltean6bef7942021-08-10 16:13:54 +0300481/* Helper for creating space for DSA header tags in TX path packets.
482 * Must not be called before skb_push(len).
483 *
484 * Before:
485 *
486 * <<<<<<< | | | | | | | | | | | | | | |
487 * ^ <<<<<<< +-----------------------+-----------------------+-------+
488 * | <<<<<<< | Destination MAC | Source MAC | EType |
489 * | +-----------------------+-----------------------+-------+
490 * <----- len ----->
491 * |
492 * |
493 * skb->data
494 *
495 * After:
496 *
497 * | | | | | | | | | | | | | | | | | | |
498 * +-----------------------+-----------------------+---------------+-------+
499 * | Destination MAC | Source MAC | DSA header | EType |
500 * +-----------------------+-----------------------+---------------+-------+
501 * ^ | |
502 * | <----- len ----->
503 * skb->data
504 */
505static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
506{
507 memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
508}
509
Vivien Didelotf515f192017-02-03 13:20:20 -0500510/* switch.c */
511int dsa_switch_register_notifier(struct dsa_switch *ds);
512void dsa_switch_unregister_notifier(struct dsa_switch *ds);
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200513
514/* dsa2.c */
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100515void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
516void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
Vladimir Oltean886f8e22021-01-29 03:00:04 +0200517int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
518int dsa_broadcast(unsigned long e, void *v);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200519int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
520 struct net_device *master,
521 const struct dsa_device_ops *tag_ops,
522 const struct dsa_device_ops *old_tag_ops);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100523
Vladimir Olteane19cc132021-07-19 20:14:51 +0300524/* tag_8021q.c */
525int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
526 struct dsa_notifier_bridge_info *info);
527int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
528 struct dsa_notifier_bridge_info *info);
Vladimir Olteanc64b9c02021-07-19 20:14:52 +0300529int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
530 struct dsa_notifier_tag_8021q_vlan_info *info);
531int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
532 struct dsa_notifier_tag_8021q_vlan_info *info);
Vladimir Olteane19cc132021-07-19 20:14:51 +0300533
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200534extern struct list_head dsa_tree_list;
535
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000536#endif