blob: 760306f0012fde416556227717965b44269279b1 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00002/*
3 * net/dsa/dsa_priv.h - Hardware switch handling
Lennert Buytenheke84665c2009-03-20 09:52:09 +00004 * Copyright (c) 2008-2009 Marvell Semiconductor
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00005 */
6
7#ifndef __DSA_PRIV_H
8#define __DSA_PRIV_H
9
Vladimir Oltean412a1522020-09-23 14:40:37 -070010#include <linux/if_bridge.h>
Jakub Kicinskib6459412021-12-28 16:49:13 -080011#include <linux/if_vlan.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000012#include <linux/phy.h>
Alexander Duyck50753142014-09-15 13:00:19 -040013#include <linux/netdevice.h>
Florian Fainelli04ff53f2015-07-31 11:42:57 -070014#include <linux/netpoll.h>
Vivien Didelotea5dd342017-05-17 15:46:03 -040015#include <net/dsa.h>
Alexander Lobakine131a562020-04-21 16:41:08 +030016#include <net/gro_cells.h>
Alexander Duyck50753142014-09-15 13:00:19 -040017
Vladimir Oltean123abc062021-07-22 18:55:40 +030018#define DSA_MAX_NUM_OFFLOADING_BRIDGES BITS_PER_LONG
19
Vivien Didelot52c96f92017-05-19 17:00:51 -040020enum {
Vivien Didelot1faabf72017-05-19 17:00:52 -040021 DSA_NOTIFIER_AGEING_TIME,
Vivien Didelot52c96f92017-05-19 17:00:51 -040022 DSA_NOTIFIER_BRIDGE_JOIN,
23 DSA_NOTIFIER_BRIDGE_LEAVE,
Vivien Didelot685fb6a2017-05-19 17:00:53 -040024 DSA_NOTIFIER_FDB_ADD,
25 DSA_NOTIFIER_FDB_DEL,
Vladimir Oltean3dc80af2021-06-29 17:06:51 +030026 DSA_NOTIFIER_HOST_FDB_ADD,
27 DSA_NOTIFIER_HOST_FDB_DEL,
Tobias Waldekranz058102a2021-01-13 09:42:53 +010028 DSA_NOTIFIER_LAG_CHANGE,
29 DSA_NOTIFIER_LAG_JOIN,
30 DSA_NOTIFIER_LAG_LEAVE,
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040031 DSA_NOTIFIER_MDB_ADD,
32 DSA_NOTIFIER_MDB_DEL,
Vladimir Olteanb8e997c2021-06-29 17:06:49 +030033 DSA_NOTIFIER_HOST_MDB_ADD,
34 DSA_NOTIFIER_HOST_MDB_DEL,
Vivien Didelotd0c627b2017-05-19 17:00:55 -040035 DSA_NOTIFIER_VLAN_ADD,
36 DSA_NOTIFIER_VLAN_DEL,
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020037 DSA_NOTIFIER_MTU,
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020038 DSA_NOTIFIER_TAG_PROTO,
Vladimir Olteandc452a42021-12-10 01:34:37 +020039 DSA_NOTIFIER_TAG_PROTO_CONNECT,
Vladimir Oltean7f297312021-12-14 03:45:36 +020040 DSA_NOTIFIER_TAG_PROTO_DISCONNECT,
Vladimir Olteanc64b9c02021-07-19 20:14:52 +030041 DSA_NOTIFIER_TAG_8021Q_VLAN_ADD,
42 DSA_NOTIFIER_TAG_8021Q_VLAN_DEL,
Vivien Didelot52c96f92017-05-19 17:00:51 -040043};
44
Vivien Didelot1faabf72017-05-19 17:00:52 -040045/* DSA_NOTIFIER_AGEING_TIME */
46struct dsa_notifier_ageing_time_info {
Vivien Didelot1faabf72017-05-19 17:00:52 -040047 unsigned int ageing_time;
Vivien Didelot1faabf72017-05-19 17:00:52 -040048};
49
Vivien Didelot52c96f92017-05-19 17:00:51 -040050/* DSA_NOTIFIER_BRIDGE_* */
51struct dsa_notifier_bridge_info {
Vladimir Olteand3eed0e2021-12-06 18:57:56 +020052 struct dsa_bridge bridge;
Vladimir Olteanf66a6a62020-05-10 19:37:41 +030053 int tree_index;
Vivien Didelot52c96f92017-05-19 17:00:51 -040054 int sw_index;
55 int port;
Vladimir Olteanb0799222021-12-06 18:57:57 +020056 bool tx_fwd_offload;
Vivien Didelot52c96f92017-05-19 17:00:51 -040057};
58
Vivien Didelot685fb6a2017-05-19 17:00:53 -040059/* DSA_NOTIFIER_FDB_* */
60struct dsa_notifier_fdb_info {
Vivien Didelot685fb6a2017-05-19 17:00:53 -040061 int sw_index;
62 int port;
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +030063 const unsigned char *addr;
64 u16 vid;
Vivien Didelot685fb6a2017-05-19 17:00:53 -040065};
66
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040067/* DSA_NOTIFIER_MDB_* */
68struct dsa_notifier_mdb_info {
69 const struct switchdev_obj_port_mdb *mdb;
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040070 int sw_index;
71 int port;
72};
73
Tobias Waldekranz058102a2021-01-13 09:42:53 +010074/* DSA_NOTIFIER_LAG_* */
75struct dsa_notifier_lag_info {
76 struct net_device *lag;
77 int sw_index;
78 int port;
79
80 struct netdev_lag_upper_info *info;
81};
82
Vivien Didelotd0c627b2017-05-19 17:00:55 -040083/* DSA_NOTIFIER_VLAN_* */
84struct dsa_notifier_vlan_info {
85 const struct switchdev_obj_port_vlan *vlan;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040086 int sw_index;
87 int port;
Vladimir Oltean31046a52021-02-13 22:43:18 +020088 struct netlink_ext_ack *extack;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040089};
90
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020091/* DSA_NOTIFIER_MTU */
92struct dsa_notifier_mtu_info {
Vladimir Oltean88faba22021-06-21 19:42:18 +030093 bool targeted_match;
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020094 int sw_index;
95 int port;
96 int mtu;
97};
98
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020099/* DSA_NOTIFIER_TAG_PROTO_* */
100struct dsa_notifier_tag_proto_info {
101 const struct dsa_device_ops *tag_ops;
102};
103
Vladimir Olteanc64b9c02021-07-19 20:14:52 +0300104/* DSA_NOTIFIER_TAG_8021Q_VLAN_* */
105struct dsa_notifier_tag_8021q_vlan_info {
106 int tree_index;
107 int sw_index;
108 int port;
109 u16 vid;
110};
111
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200112struct dsa_switchdev_event_work {
113 struct dsa_switch *ds;
114 int port;
Vladimir Oltean4bed3972021-06-29 17:06:57 +0300115 struct net_device *dev;
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200116 struct work_struct work;
117 unsigned long event;
118 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
119 * SWITCHDEV_FDB_DEL_TO_DEVICE
120 */
121 unsigned char addr[ETH_ALEN];
122 u16 vid;
Vladimir Oltean3dc80af2021-06-29 17:06:51 +0300123 bool host_addr;
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200124};
125
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000126struct dsa_slave_priv {
Vivien Didelot15240242017-09-29 17:19:18 -0400127 /* Copy of CPU port xmit for faster access in slave transmit hot path */
Florian Fainelli4ed70ce2015-07-31 11:42:56 -0700128 struct sk_buff * (*xmit)(struct sk_buff *skb,
Alexander Duyck50753142014-09-15 13:00:19 -0400129 struct net_device *dev);
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000130
Alexander Lobakine131a562020-04-21 16:41:08 +0300131 struct gro_cells gcells;
132
Vivien Didelotafdcf152017-01-27 15:29:39 -0500133 /* DSA port data, such as switch, port index, etc. */
134 struct dsa_port *dp;
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000135
Florian Fainelli04ff53f2015-07-31 11:42:57 -0700136#ifdef CONFIG_NET_POLL_CONTROLLER
137 struct netpoll *netpoll;
138#endif
Florian Fainellif50f2122017-01-30 12:41:40 -0800139
140 /* TC context */
141 struct list_head mall_tc_list;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000142};
143
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000144/* dsa.c */
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200145const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
Andrew Lunn4dad81e2019-04-28 19:37:19 +0200146void dsa_tag_driver_put(const struct dsa_device_ops *ops);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200147const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200148
Arkadi Sharshevskyc9eb3e02017-08-06 16:15:42 +0300149bool dsa_schedule_work(struct work_struct *work);
Vladimir Olteana57d8c212021-09-14 16:47:26 +0300150void dsa_flush_workqueue(void);
Florian Fainelli98cdb482018-09-07 11:09:02 -0700151const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000152
Vladimir Oltean4e500252021-06-11 22:01:24 +0300153static inline int dsa_tag_protocol_overhead(const struct dsa_device_ops *ops)
154{
155 return ops->needed_headroom + ops->needed_tailroom;
156}
157
Vivien Didelotf2f23562017-09-19 11:57:00 -0400158/* master.c */
Vivien Didelot17a22fc2017-11-06 16:11:45 -0500159int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
160void dsa_master_teardown(struct net_device *dev);
Vivien Didelotf2f23562017-09-19 11:57:00 -0400161
Vivien Didelot2231c432017-10-16 11:12:17 -0400162static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
163 int device, int port)
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400164{
Vivien Didelot2f657a62017-09-29 17:19:20 -0400165 struct dsa_port *cpu_dp = dev->dsa_ptr;
166 struct dsa_switch_tree *dst = cpu_dp->dst;
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400167 struct dsa_port *dp;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400168
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400169 list_for_each_entry(dp, &dst->ports, list)
170 if (dp->ds->index == device && dp->index == port &&
171 dp->type == DSA_PORT_TYPE_USER)
172 return dp->slave;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400173
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400174 return NULL;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400175}
176
Vivien Didelota40c1752017-05-19 17:00:44 -0400177/* port.c */
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200178void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
179 const struct dsa_device_ops *tag_ops);
Vladimir Oltean39f32102021-08-08 14:16:37 +0300180int dsa_port_set_state(struct dsa_port *dp, u8 state, bool do_fast_age);
Russell King8640f8d2020-03-03 15:01:46 +0000181int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
Vivien Didelotfb8a6a22017-09-22 19:01:56 -0400182int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
Russell King8640f8d2020-03-03 15:01:46 +0000183void dsa_port_disable_rt(struct dsa_port *dp);
Andrew Lunn75104db2019-02-24 20:44:43 +0100184void dsa_port_disable(struct dsa_port *dp);
Vladimir Oltean2afc5262021-03-23 01:51:48 +0200185int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br,
186 struct netlink_ext_ack *extack);
Vladimir Oltean4e51bf42021-07-21 19:24:03 +0300187void dsa_port_pre_bridge_leave(struct dsa_port *dp, struct net_device *br);
Vivien Didelotcfbed322017-05-19 17:00:45 -0400188void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100189int dsa_port_lag_change(struct dsa_port *dp,
190 struct netdev_lag_lower_state_info *linfo);
191int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
Vladimir Oltean2afc5262021-03-23 01:51:48 +0200192 struct netdev_lag_upper_info *uinfo,
193 struct netlink_ext_ack *extack);
Vladimir Oltean4e51bf42021-07-21 19:24:03 +0300194void dsa_port_pre_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100195void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
Vladimir Oltean89153ed2021-02-13 22:43:19 +0200196int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
197 struct netlink_ext_ack *extack);
Russell King54a0ed02020-05-12 20:20:25 +0300198bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
Vladimir Olteanbae33f22021-01-09 02:01:50 +0200199int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
Vladimir Olteanbfcb8132020-03-27 21:55:42 +0200200int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
Vladimir Oltean88faba22021-06-21 19:42:18 +0300201 bool targeted_match);
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +0300202int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
203 u16 vid);
204int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
205 u16 vid);
Vladimir Oltean3dc80af2021-06-29 17:06:51 +0300206int dsa_port_host_fdb_add(struct dsa_port *dp, const unsigned char *addr,
207 u16 vid);
208int dsa_port_host_fdb_del(struct dsa_port *dp, const unsigned char *addr,
209 u16 vid);
Vivien Didelotde40fc52017-09-20 19:32:14 -0400210int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100211int dsa_port_mdb_add(const struct dsa_port *dp,
Vladimir Olteanffb68fc2021-01-09 02:01:48 +0200212 const struct switchdev_obj_port_mdb *mdb);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100213int dsa_port_mdb_del(const struct dsa_port *dp,
Vivien Didelot3a9afea2017-05-19 17:00:49 -0400214 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteanb8e997c2021-06-29 17:06:49 +0300215int dsa_port_host_mdb_add(const struct dsa_port *dp,
216 const struct switchdev_obj_port_mdb *mdb);
217int dsa_port_host_mdb_del(const struct dsa_port *dp,
218 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteane18f4c12021-02-12 17:15:55 +0200219int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200220 struct switchdev_brport_flags flags,
221 struct netlink_ext_ack *extack);
Vladimir Oltean045c45d2021-08-08 17:35:23 +0300222int dsa_port_bridge_flags(struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200223 struct switchdev_brport_flags flags,
224 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400225int dsa_port_vlan_add(struct dsa_port *dp,
Vladimir Oltean31046a52021-02-13 22:43:18 +0200226 const struct switchdev_obj_port_vlan *vlan,
227 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400228int dsa_port_vlan_del(struct dsa_port *dp,
229 const struct switchdev_obj_port_vlan *vlan);
Horatiu Vulturc595c432021-02-16 22:42:04 +0100230int dsa_port_mrp_add(const struct dsa_port *dp,
231 const struct switchdev_obj_mrp *mrp);
232int dsa_port_mrp_del(const struct dsa_port *dp,
233 const struct switchdev_obj_mrp *mrp);
234int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
235 const struct switchdev_obj_ring_role_mrp *mrp);
236int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
237 const struct switchdev_obj_ring_role_mrp *mrp);
Russell King (Oracle)21bd64b2021-11-30 13:09:55 +0000238int dsa_port_phylink_create(struct dsa_port *dp);
Sebastian Reichel33615362018-01-23 16:03:46 +0100239int dsa_port_link_register_of(struct dsa_port *dp);
240void dsa_port_link_unregister_of(struct dsa_port *dp);
George McCollister18596f52021-02-09 19:02:12 -0600241int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
242void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
Vladimir Oltean724395f2021-08-11 16:46:06 +0300243int dsa_port_tag_8021q_vlan_add(struct dsa_port *dp, u16 vid, bool broadcast);
244void dsa_port_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid, bool broadcast);
Vivien Didelot57ab1ca2017-10-26 10:50:07 -0400245
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000246/* slave.c */
Alexander Duyck50753142014-09-15 13:00:19 -0400247extern const struct dsa_device_ops notag_netdev_ops;
Vladimir Oltean010e2692021-03-23 01:51:50 +0200248extern struct notifier_block dsa_slave_switchdev_notifier;
249extern struct notifier_block dsa_slave_switchdev_blocking_notifier;
250
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000251void dsa_slave_mii_bus_init(struct dsa_switch *ds);
Vivien Didelot951259aa2017-10-27 15:55:19 -0400252int dsa_slave_create(struct dsa_port *dp);
Neil Armstrongcda5c152015-12-07 13:57:35 +0100253void dsa_slave_destroy(struct net_device *slave_dev);
Florian Fainelli24462542014-09-18 17:31:22 -0700254int dsa_slave_suspend(struct net_device *slave_dev);
255int dsa_slave_resume(struct net_device *slave_dev);
Vivien Didelot88e4f0c2017-02-03 13:20:16 -0500256int dsa_slave_register_notifier(void);
257void dsa_slave_unregister_notifier(void);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200258void dsa_slave_setup_tagger(struct net_device *slave);
259int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
Vladimir Oltean06cfb2d2021-08-24 00:22:57 +0300260int dsa_slave_manage_vlan_filtering(struct net_device *dev,
261 bool vlan_filtering);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000262
Vivien Didelotd9450972017-10-16 11:12:15 -0400263static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
264{
265 struct dsa_slave_priv *p = netdev_priv(dev);
266
267 return p->dp;
268}
269
Vivien Didelotd0006b02017-10-16 11:12:16 -0400270static inline struct net_device *
271dsa_slave_to_master(const struct net_device *dev)
272{
273 struct dsa_port *dp = dsa_slave_to_port(dev);
274
Vivien Didelotf8b8b1c2017-10-16 11:12:18 -0400275 return dp->cpu_dp->master;
Vivien Didelotd0006b02017-10-16 11:12:16 -0400276}
277
Vladimir Oltean412a1522020-09-23 14:40:37 -0700278/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
279 * frames as untagged, since the bridge will not untag them.
280 */
281static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
282{
283 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
Vladimir Oltean36cbf392021-12-06 18:57:52 +0200284 struct net_device *br = dsa_port_bridge_dev_get(dp);
Vladimir Oltean412a1522020-09-23 14:40:37 -0700285 struct net_device *dev = skb->dev;
286 struct net_device *upper_dev;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700287 u16 vid, pvid, proto;
288 int err;
289
290 if (!br || br_vlan_enabled(br))
291 return skb;
292
293 err = br_vlan_get_proto(br, &proto);
294 if (err)
295 return skb;
296
297 /* Move VLAN tag from data to hwaccel */
Florian Fainellia3482922020-10-01 19:42:14 -0700298 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
Vladimir Oltean412a1522020-09-23 14:40:37 -0700299 skb = skb_vlan_untag(skb);
300 if (!skb)
301 return NULL;
302 }
303
304 if (!skb_vlan_tag_present(skb))
305 return skb;
306
307 vid = skb_vlan_tag_get_id(skb);
308
309 /* We already run under an RCU read-side critical section since
310 * we are called from netif_receive_skb_list_internal().
311 */
312 err = br_vlan_get_pvid_rcu(dev, &pvid);
313 if (err)
314 return skb;
315
316 if (vid != pvid)
317 return skb;
318
319 /* The sad part about attempting to untag from DSA is that we
320 * don't know, unless we check, if the skb will end up in
321 * the bridge's data path - br_allowed_ingress() - or not.
322 * For example, there might be an 8021q upper for the
323 * default_pvid of the bridge, which will steal VLAN-tagged traffic
324 * from the bridge's data path. This is a configuration that DSA
325 * supports because vlan_filtering is 0. In that case, we should
326 * definitely keep the tag, to make sure it keeps working.
327 */
Florian Fainelli3a688442020-10-01 19:42:15 -0700328 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
329 if (upper_dev)
330 return skb;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700331
332 __vlan_hwaccel_clear_tag(skb);
333
334 return skb;
335}
336
Vladimir Oltean884be122021-07-26 19:55:34 +0300337/* For switches without hardware support for DSA tagging to be able
338 * to support termination through the bridge.
339 */
340static inline struct net_device *
341dsa_find_designated_bridge_port_by_vid(struct net_device *master, u16 vid)
342{
343 struct dsa_port *cpu_dp = master->dsa_ptr;
344 struct dsa_switch_tree *dst = cpu_dp->dst;
345 struct bridge_vlan_info vinfo;
346 struct net_device *slave;
347 struct dsa_port *dp;
348 int err;
349
350 list_for_each_entry(dp, &dst->ports, list) {
351 if (dp->type != DSA_PORT_TYPE_USER)
352 continue;
353
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200354 if (!dp->bridge)
Vladimir Oltean884be122021-07-26 19:55:34 +0300355 continue;
356
357 if (dp->stp_state != BR_STATE_LEARNING &&
358 dp->stp_state != BR_STATE_FORWARDING)
359 continue;
360
361 /* Since the bridge might learn this packet, keep the CPU port
362 * affinity with the port that will be used for the reply on
363 * xmit.
364 */
365 if (dp->cpu_dp != cpu_dp)
366 continue;
367
368 slave = dp->slave;
369
370 err = br_vlan_get_info_rcu(slave, vid, &vinfo);
371 if (err)
372 continue;
373
374 return slave;
375 }
376
377 return NULL;
378}
379
Vladimir Olteanbea79072021-07-29 17:56:00 +0300380/* If the ingress port offloads the bridge, we mark the frame as autonomously
381 * forwarded by hardware, so the software bridge doesn't forward in twice, back
382 * to us, because we already did. However, if we're in fallback mode and we do
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200383 * software bridging, we are not offloading it, therefore the dp->bridge
Vladimir Olteanbea79072021-07-29 17:56:00 +0300384 * pointer is not populated, and flooding needs to be done by software (we are
385 * effectively operating in standalone ports mode).
386 */
387static inline void dsa_default_offload_fwd_mark(struct sk_buff *skb)
388{
389 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
390
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200391 skb->offload_fwd_mark = !!(dp->bridge);
Vladimir Olteanbea79072021-07-29 17:56:00 +0300392}
393
Vladimir Olteanf1dacd72021-08-10 16:13:53 +0300394/* Helper for removing DSA header tags from packets in the RX path.
395 * Must not be called before skb_pull(len).
396 * skb->data
397 * |
398 * v
399 * | | | | | | | | | | | | | | | | | | |
400 * +-----------------------+-----------------------+---------------+-------+
401 * | Destination MAC | Source MAC | DSA header | EType |
402 * +-----------------------+-----------------------+---------------+-------+
403 * | |
404 * <----- len -----> <----- len ----->
405 * |
406 * >>>>>>> v
407 * >>>>>>> | | | | | | | | | | | | | | |
408 * >>>>>>> +-----------------------+-----------------------+-------+
409 * >>>>>>> | Destination MAC | Source MAC | EType |
410 * +-----------------------+-----------------------+-------+
411 * ^
412 * |
413 * skb->data
414 */
415static inline void dsa_strip_etype_header(struct sk_buff *skb, int len)
416{
417 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - len, 2 * ETH_ALEN);
418}
419
Vladimir Oltean6bef7942021-08-10 16:13:54 +0300420/* Helper for creating space for DSA header tags in TX path packets.
421 * Must not be called before skb_push(len).
422 *
423 * Before:
424 *
425 * <<<<<<< | | | | | | | | | | | | | | |
426 * ^ <<<<<<< +-----------------------+-----------------------+-------+
427 * | <<<<<<< | Destination MAC | Source MAC | EType |
428 * | +-----------------------+-----------------------+-------+
429 * <----- len ----->
430 * |
431 * |
432 * skb->data
433 *
434 * After:
435 *
436 * | | | | | | | | | | | | | | | | | | |
437 * +-----------------------+-----------------------+---------------+-------+
438 * | Destination MAC | Source MAC | DSA header | EType |
439 * +-----------------------+-----------------------+---------------+-------+
440 * ^ | |
441 * | <----- len ----->
442 * skb->data
443 */
444static inline void dsa_alloc_etype_header(struct sk_buff *skb, int len)
445{
446 memmove(skb->data, skb->data + len, 2 * ETH_ALEN);
447}
448
Vladimir Oltean5d928ff2021-08-10 16:13:55 +0300449/* On RX, eth_type_trans() on the DSA master pulls ETH_HLEN bytes starting from
450 * skb_mac_header(skb), which leaves skb->data pointing at the first byte after
451 * what the DSA master perceives as the EtherType (the beginning of the L3
452 * protocol). Since DSA EtherType header taggers treat the EtherType as part of
453 * the DSA tag itself, and the EtherType is 2 bytes in length, the DSA header
454 * is located 2 bytes behind skb->data. Note that EtherType in this context
455 * means the first 2 bytes of the DSA header, not the encapsulated EtherType
456 * that will become visible after the DSA header is stripped.
457 */
458static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb)
459{
460 return skb->data - 2;
461}
462
Vladimir Olteana72808b2021-08-10 16:13:56 +0300463/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType
464 * header taggers start exactly where the EtherType is (the EtherType is
465 * treated as part of the DSA header).
466 */
467static inline void *dsa_etype_header_pos_tx(struct sk_buff *skb)
468{
469 return skb->data + 2 * ETH_ALEN;
470}
471
Vivien Didelotf515f192017-02-03 13:20:20 -0500472/* switch.c */
473int dsa_switch_register_notifier(struct dsa_switch *ds);
474void dsa_switch_unregister_notifier(struct dsa_switch *ds);
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200475
476/* dsa2.c */
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100477void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
478void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
Vladimir Oltean886f8e22021-01-29 03:00:04 +0200479int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
480int dsa_broadcast(unsigned long e, void *v);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200481int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
482 struct net_device *master,
483 const struct dsa_device_ops *tag_ops,
484 const struct dsa_device_ops *old_tag_ops);
Vladimir Oltean3f9bb032021-12-06 18:57:47 +0200485unsigned int dsa_bridge_num_get(const struct net_device *bridge_dev, int max);
486void dsa_bridge_num_put(const struct net_device *bridge_dev,
487 unsigned int bridge_num);
Vladimir Olteand3eed0e2021-12-06 18:57:56 +0200488struct dsa_bridge *dsa_tree_bridge_find(struct dsa_switch_tree *dst,
489 const struct net_device *br);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100490
Vladimir Olteane19cc132021-07-19 20:14:51 +0300491/* tag_8021q.c */
492int dsa_tag_8021q_bridge_join(struct dsa_switch *ds,
493 struct dsa_notifier_bridge_info *info);
494int dsa_tag_8021q_bridge_leave(struct dsa_switch *ds,
495 struct dsa_notifier_bridge_info *info);
Vladimir Olteanc64b9c02021-07-19 20:14:52 +0300496int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds,
497 struct dsa_notifier_tag_8021q_vlan_info *info);
498int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds,
499 struct dsa_notifier_tag_8021q_vlan_info *info);
Vladimir Olteane19cc132021-07-19 20:14:51 +0300500
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200501extern struct list_head dsa_tree_list;
502
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000503#endif