blob: 2eeaa42f2e083aa1287cd34ba2fbd582e0cb6d88 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001/* SPDX-License-Identifier: GPL-2.0-or-later */
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00002/*
3 * net/dsa/dsa_priv.h - Hardware switch handling
Lennert Buytenheke84665c2009-03-20 09:52:09 +00004 * Copyright (c) 2008-2009 Marvell Semiconductor
Lennert Buytenhek91da11f2008-10-07 13:44:02 +00005 */
6
7#ifndef __DSA_PRIV_H
8#define __DSA_PRIV_H
9
Vladimir Oltean412a1522020-09-23 14:40:37 -070010#include <linux/if_bridge.h>
Lennert Buytenhek91da11f2008-10-07 13:44:02 +000011#include <linux/phy.h>
Alexander Duyck50753142014-09-15 13:00:19 -040012#include <linux/netdevice.h>
Florian Fainelli04ff53f2015-07-31 11:42:57 -070013#include <linux/netpoll.h>
Vivien Didelotea5dd342017-05-17 15:46:03 -040014#include <net/dsa.h>
Alexander Lobakine131a562020-04-21 16:41:08 +030015#include <net/gro_cells.h>
Alexander Duyck50753142014-09-15 13:00:19 -040016
Vivien Didelot52c96f92017-05-19 17:00:51 -040017enum {
Vivien Didelot1faabf72017-05-19 17:00:52 -040018 DSA_NOTIFIER_AGEING_TIME,
Vivien Didelot52c96f92017-05-19 17:00:51 -040019 DSA_NOTIFIER_BRIDGE_JOIN,
20 DSA_NOTIFIER_BRIDGE_LEAVE,
Vivien Didelot685fb6a2017-05-19 17:00:53 -040021 DSA_NOTIFIER_FDB_ADD,
22 DSA_NOTIFIER_FDB_DEL,
George McCollister18596f52021-02-09 19:02:12 -060023 DSA_NOTIFIER_HSR_JOIN,
24 DSA_NOTIFIER_HSR_LEAVE,
Tobias Waldekranz058102a2021-01-13 09:42:53 +010025 DSA_NOTIFIER_LAG_CHANGE,
26 DSA_NOTIFIER_LAG_JOIN,
27 DSA_NOTIFIER_LAG_LEAVE,
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040028 DSA_NOTIFIER_MDB_ADD,
29 DSA_NOTIFIER_MDB_DEL,
Vivien Didelotd0c627b2017-05-19 17:00:55 -040030 DSA_NOTIFIER_VLAN_ADD,
31 DSA_NOTIFIER_VLAN_DEL,
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020032 DSA_NOTIFIER_MTU,
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020033 DSA_NOTIFIER_TAG_PROTO,
Horatiu Vulturc595c432021-02-16 22:42:04 +010034 DSA_NOTIFIER_MRP_ADD,
35 DSA_NOTIFIER_MRP_DEL,
36 DSA_NOTIFIER_MRP_ADD_RING_ROLE,
37 DSA_NOTIFIER_MRP_DEL_RING_ROLE,
Vivien Didelot52c96f92017-05-19 17:00:51 -040038};
39
Vivien Didelot1faabf72017-05-19 17:00:52 -040040/* DSA_NOTIFIER_AGEING_TIME */
41struct dsa_notifier_ageing_time_info {
Vivien Didelot1faabf72017-05-19 17:00:52 -040042 unsigned int ageing_time;
Vivien Didelot1faabf72017-05-19 17:00:52 -040043};
44
Vivien Didelot52c96f92017-05-19 17:00:51 -040045/* DSA_NOTIFIER_BRIDGE_* */
46struct dsa_notifier_bridge_info {
47 struct net_device *br;
Vladimir Olteanf66a6a62020-05-10 19:37:41 +030048 int tree_index;
Vivien Didelot52c96f92017-05-19 17:00:51 -040049 int sw_index;
50 int port;
51};
52
Vivien Didelot685fb6a2017-05-19 17:00:53 -040053/* DSA_NOTIFIER_FDB_* */
54struct dsa_notifier_fdb_info {
Vivien Didelot685fb6a2017-05-19 17:00:53 -040055 int sw_index;
56 int port;
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +030057 const unsigned char *addr;
58 u16 vid;
Vivien Didelot685fb6a2017-05-19 17:00:53 -040059};
60
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040061/* DSA_NOTIFIER_MDB_* */
62struct dsa_notifier_mdb_info {
63 const struct switchdev_obj_port_mdb *mdb;
Vivien Didelot8ae5bcd2017-05-19 17:00:54 -040064 int sw_index;
65 int port;
66};
67
Tobias Waldekranz058102a2021-01-13 09:42:53 +010068/* DSA_NOTIFIER_LAG_* */
69struct dsa_notifier_lag_info {
70 struct net_device *lag;
71 int sw_index;
72 int port;
73
74 struct netdev_lag_upper_info *info;
75};
76
Vivien Didelotd0c627b2017-05-19 17:00:55 -040077/* DSA_NOTIFIER_VLAN_* */
78struct dsa_notifier_vlan_info {
79 const struct switchdev_obj_port_vlan *vlan;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040080 int sw_index;
81 int port;
Vladimir Oltean31046a52021-02-13 22:43:18 +020082 struct netlink_ext_ack *extack;
Vivien Didelotd0c627b2017-05-19 17:00:55 -040083};
84
Vladimir Olteanbfcb8132020-03-27 21:55:42 +020085/* DSA_NOTIFIER_MTU */
86struct dsa_notifier_mtu_info {
87 bool propagate_upstream;
88 int sw_index;
89 int port;
90 int mtu;
91};
92
Vladimir Oltean53da0eb2021-01-29 03:00:06 +020093/* DSA_NOTIFIER_TAG_PROTO_* */
94struct dsa_notifier_tag_proto_info {
95 const struct dsa_device_ops *tag_ops;
96};
97
Horatiu Vulturc595c432021-02-16 22:42:04 +010098/* DSA_NOTIFIER_MRP_* */
99struct dsa_notifier_mrp_info {
100 const struct switchdev_obj_mrp *mrp;
101 int sw_index;
102 int port;
103};
104
105/* DSA_NOTIFIER_MRP_* */
106struct dsa_notifier_mrp_ring_role_info {
107 const struct switchdev_obj_ring_role_mrp *mrp;
108 int sw_index;
109 int port;
110};
111
Vladimir Olteanc4bb76a2021-01-06 11:51:32 +0200112struct dsa_switchdev_event_work {
113 struct dsa_switch *ds;
114 int port;
115 struct work_struct work;
116 unsigned long event;
117 /* Specific for SWITCHDEV_FDB_ADD_TO_DEVICE and
118 * SWITCHDEV_FDB_DEL_TO_DEVICE
119 */
120 unsigned char addr[ETH_ALEN];
121 u16 vid;
122};
123
George McCollister18596f52021-02-09 19:02:12 -0600124/* DSA_NOTIFIER_HSR_* */
125struct dsa_notifier_hsr_info {
126 struct net_device *hsr;
127 int sw_index;
128 int port;
129};
130
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000131struct dsa_slave_priv {
Vivien Didelot15240242017-09-29 17:19:18 -0400132 /* Copy of CPU port xmit for faster access in slave transmit hot path */
Florian Fainelli4ed70ce2015-07-31 11:42:56 -0700133 struct sk_buff * (*xmit)(struct sk_buff *skb,
Alexander Duyck50753142014-09-15 13:00:19 -0400134 struct net_device *dev);
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000135
Alexander Lobakine131a562020-04-21 16:41:08 +0300136 struct gro_cells gcells;
137
Vivien Didelotafdcf152017-01-27 15:29:39 -0500138 /* DSA port data, such as switch, port index, etc. */
139 struct dsa_port *dp;
Lennert Buytenheke84665c2009-03-20 09:52:09 +0000140
Florian Fainelli04ff53f2015-07-31 11:42:57 -0700141#ifdef CONFIG_NET_POLL_CONTROLLER
142 struct netpoll *netpoll;
143#endif
Florian Fainellif50f2122017-01-30 12:41:40 -0800144
145 /* TC context */
146 struct list_head mall_tc_list;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000147};
148
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000149/* dsa.c */
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200150const struct dsa_device_ops *dsa_tag_driver_get(int tag_protocol);
Andrew Lunn4dad81e2019-04-28 19:37:19 +0200151void dsa_tag_driver_put(const struct dsa_device_ops *ops);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200152const struct dsa_device_ops *dsa_find_tagger_by_name(const char *buf);
Andrew Lunnc39e2a12019-04-28 19:37:18 +0200153
Arkadi Sharshevskyc9eb3e02017-08-06 16:15:42 +0300154bool dsa_schedule_work(struct work_struct *work);
Florian Fainelli98cdb482018-09-07 11:09:02 -0700155const char *dsa_tag_protocol_to_str(const struct dsa_device_ops *ops);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000156
Vivien Didelotf2f23562017-09-19 11:57:00 -0400157/* master.c */
Vivien Didelot17a22fc2017-11-06 16:11:45 -0500158int dsa_master_setup(struct net_device *dev, struct dsa_port *cpu_dp);
159void dsa_master_teardown(struct net_device *dev);
Vivien Didelotf2f23562017-09-19 11:57:00 -0400160
Vivien Didelot2231c432017-10-16 11:12:17 -0400161static inline struct net_device *dsa_master_find_slave(struct net_device *dev,
162 int device, int port)
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400163{
Vivien Didelot2f657a62017-09-29 17:19:20 -0400164 struct dsa_port *cpu_dp = dev->dsa_ptr;
165 struct dsa_switch_tree *dst = cpu_dp->dst;
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400166 struct dsa_port *dp;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400167
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400168 list_for_each_entry(dp, &dst->ports, list)
169 if (dp->ds->index == device && dp->index == port &&
170 dp->type == DSA_PORT_TYPE_USER)
171 return dp->slave;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400172
Vivien Didelot7b9a2f42019-10-21 16:51:18 -0400173 return NULL;
Vivien Didelot3775b1b2017-09-29 17:19:15 -0400174}
175
Vivien Didelota40c1752017-05-19 17:00:44 -0400176/* port.c */
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200177void dsa_port_set_tag_protocol(struct dsa_port *cpu_dp,
178 const struct dsa_device_ops *tag_ops);
Vladimir Olteanbae33f22021-01-09 02:01:50 +0200179int dsa_port_set_state(struct dsa_port *dp, u8 state);
Russell King8640f8d2020-03-03 15:01:46 +0000180int dsa_port_enable_rt(struct dsa_port *dp, struct phy_device *phy);
Vivien Didelotfb8a6a22017-09-22 19:01:56 -0400181int dsa_port_enable(struct dsa_port *dp, struct phy_device *phy);
Russell King8640f8d2020-03-03 15:01:46 +0000182void dsa_port_disable_rt(struct dsa_port *dp);
Andrew Lunn75104db2019-02-24 20:44:43 +0100183void dsa_port_disable(struct dsa_port *dp);
Vivien Didelotcfbed322017-05-19 17:00:45 -0400184int dsa_port_bridge_join(struct dsa_port *dp, struct net_device *br);
185void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100186int dsa_port_lag_change(struct dsa_port *dp,
187 struct netdev_lag_lower_state_info *linfo);
188int dsa_port_lag_join(struct dsa_port *dp, struct net_device *lag_dev,
189 struct netdev_lag_upper_info *uinfo);
190void dsa_port_lag_leave(struct dsa_port *dp, struct net_device *lag_dev);
Vladimir Oltean89153ed2021-02-13 22:43:19 +0200191int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering,
192 struct netlink_ext_ack *extack);
Russell King54a0ed02020-05-12 20:20:25 +0300193bool dsa_port_skip_vlan_configuration(struct dsa_port *dp);
Vladimir Olteanbae33f22021-01-09 02:01:50 +0200194int dsa_port_ageing_time(struct dsa_port *dp, clock_t ageing_clock);
Vladimir Olteanbfcb8132020-03-27 21:55:42 +0200195int dsa_port_mtu_change(struct dsa_port *dp, int new_mtu,
196 bool propagate_upstream);
Arkadi Sharshevsky2acf4e62017-08-06 16:15:41 +0300197int dsa_port_fdb_add(struct dsa_port *dp, const unsigned char *addr,
198 u16 vid);
199int dsa_port_fdb_del(struct dsa_port *dp, const unsigned char *addr,
200 u16 vid);
Vivien Didelotde40fc52017-09-20 19:32:14 -0400201int dsa_port_fdb_dump(struct dsa_port *dp, dsa_fdb_dump_cb_t *cb, void *data);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100202int dsa_port_mdb_add(const struct dsa_port *dp,
Vladimir Olteanffb68fc2021-01-09 02:01:48 +0200203 const struct switchdev_obj_port_mdb *mdb);
Andrew Lunnbb9f603172017-11-09 23:11:01 +0100204int dsa_port_mdb_del(const struct dsa_port *dp,
Vivien Didelot3a9afea2017-05-19 17:00:49 -0400205 const struct switchdev_obj_port_mdb *mdb);
Vladimir Olteane18f4c12021-02-12 17:15:55 +0200206int dsa_port_pre_bridge_flags(const struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200207 struct switchdev_brport_flags flags,
208 struct netlink_ext_ack *extack);
Vladimir Olteane18f4c12021-02-12 17:15:55 +0200209int dsa_port_bridge_flags(const struct dsa_port *dp,
Vladimir Olteana8b659e2021-02-12 17:15:56 +0200210 struct switchdev_brport_flags flags,
211 struct netlink_ext_ack *extack);
212int dsa_port_mrouter(struct dsa_port *dp, bool mrouter,
213 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400214int dsa_port_vlan_add(struct dsa_port *dp,
Vladimir Oltean31046a52021-02-13 22:43:18 +0200215 const struct switchdev_obj_port_vlan *vlan,
216 struct netlink_ext_ack *extack);
Vivien Didelot076e7132017-05-19 17:00:50 -0400217int dsa_port_vlan_del(struct dsa_port *dp,
218 const struct switchdev_obj_port_vlan *vlan);
Horatiu Vulturc595c432021-02-16 22:42:04 +0100219int dsa_port_mrp_add(const struct dsa_port *dp,
220 const struct switchdev_obj_mrp *mrp);
221int dsa_port_mrp_del(const struct dsa_port *dp,
222 const struct switchdev_obj_mrp *mrp);
223int dsa_port_mrp_add_ring_role(const struct dsa_port *dp,
224 const struct switchdev_obj_ring_role_mrp *mrp);
225int dsa_port_mrp_del_ring_role(const struct dsa_port *dp,
226 const struct switchdev_obj_ring_role_mrp *mrp);
Sebastian Reichel33615362018-01-23 16:03:46 +0100227int dsa_port_link_register_of(struct dsa_port *dp);
228void dsa_port_link_unregister_of(struct dsa_port *dp);
George McCollister18596f52021-02-09 19:02:12 -0600229int dsa_port_hsr_join(struct dsa_port *dp, struct net_device *hsr);
230void dsa_port_hsr_leave(struct dsa_port *dp, struct net_device *hsr);
Ioana Ciornei77373d42019-05-28 20:38:15 +0300231extern const struct phylink_mac_ops dsa_port_phylink_mac_ops;
Vivien Didelot57ab1ca2017-10-26 10:50:07 -0400232
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100233static inline bool dsa_port_offloads_netdev(struct dsa_port *dp,
234 struct net_device *dev)
235{
236 /* Switchdev offloading can be configured on: */
237
238 if (dev == dp->slave)
Vladimir Oltean99b82022021-02-03 01:31:09 +0200239 /* DSA ports directly connected to a bridge, and event
240 * was emitted for the ports themselves.
241 */
242 return true;
243
244 if (dp->bridge_dev == dev)
245 /* DSA ports connected to a bridge, and event was emitted
246 * for the bridge.
247 */
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100248 return true;
249
250 if (dp->lag_dev == dev)
251 /* DSA ports connected to a bridge via a LAG */
252 return true;
253
254 return false;
255}
256
Vladimir Olteana324d3d2021-02-06 00:02:20 +0200257/* Returns true if any port of this tree offloads the given net_device */
258static inline bool dsa_tree_offloads_netdev(struct dsa_switch_tree *dst,
259 struct net_device *dev)
260{
261 struct dsa_port *dp;
262
263 list_for_each_entry(dp, &dst->ports, list)
264 if (dsa_port_offloads_netdev(dp, dev))
265 return true;
266
267 return false;
268}
269
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000270/* slave.c */
Alexander Duyck50753142014-09-15 13:00:19 -0400271extern const struct dsa_device_ops notag_netdev_ops;
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000272void dsa_slave_mii_bus_init(struct dsa_switch *ds);
Vivien Didelot951259aa2017-10-27 15:55:19 -0400273int dsa_slave_create(struct dsa_port *dp);
Neil Armstrongcda5c152015-12-07 13:57:35 +0100274void dsa_slave_destroy(struct net_device *slave_dev);
Florian Fainelli24462542014-09-18 17:31:22 -0700275int dsa_slave_suspend(struct net_device *slave_dev);
276int dsa_slave_resume(struct net_device *slave_dev);
Vivien Didelot88e4f0c2017-02-03 13:20:16 -0500277int dsa_slave_register_notifier(void);
278void dsa_slave_unregister_notifier(void);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200279void dsa_slave_setup_tagger(struct net_device *slave);
280int dsa_slave_change_mtu(struct net_device *dev, int new_mtu);
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000281
Vivien Didelotd9450972017-10-16 11:12:15 -0400282static inline struct dsa_port *dsa_slave_to_port(const struct net_device *dev)
283{
284 struct dsa_slave_priv *p = netdev_priv(dev);
285
286 return p->dp;
287}
288
Vivien Didelotd0006b02017-10-16 11:12:16 -0400289static inline struct net_device *
290dsa_slave_to_master(const struct net_device *dev)
291{
292 struct dsa_port *dp = dsa_slave_to_port(dev);
293
Vivien Didelotf8b8b1c2017-10-16 11:12:18 -0400294 return dp->cpu_dp->master;
Vivien Didelotd0006b02017-10-16 11:12:16 -0400295}
296
Vladimir Oltean412a1522020-09-23 14:40:37 -0700297/* If under a bridge with vlan_filtering=0, make sure to send pvid-tagged
298 * frames as untagged, since the bridge will not untag them.
299 */
300static inline struct sk_buff *dsa_untag_bridge_pvid(struct sk_buff *skb)
301{
302 struct dsa_port *dp = dsa_slave_to_port(skb->dev);
Vladimir Oltean412a1522020-09-23 14:40:37 -0700303 struct net_device *br = dp->bridge_dev;
304 struct net_device *dev = skb->dev;
305 struct net_device *upper_dev;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700306 u16 vid, pvid, proto;
307 int err;
308
309 if (!br || br_vlan_enabled(br))
310 return skb;
311
312 err = br_vlan_get_proto(br, &proto);
313 if (err)
314 return skb;
315
316 /* Move VLAN tag from data to hwaccel */
Florian Fainellia3482922020-10-01 19:42:14 -0700317 if (!skb_vlan_tag_present(skb) && skb->protocol == htons(proto)) {
Vladimir Oltean412a1522020-09-23 14:40:37 -0700318 skb = skb_vlan_untag(skb);
319 if (!skb)
320 return NULL;
321 }
322
323 if (!skb_vlan_tag_present(skb))
324 return skb;
325
326 vid = skb_vlan_tag_get_id(skb);
327
328 /* We already run under an RCU read-side critical section since
329 * we are called from netif_receive_skb_list_internal().
330 */
331 err = br_vlan_get_pvid_rcu(dev, &pvid);
332 if (err)
333 return skb;
334
335 if (vid != pvid)
336 return skb;
337
338 /* The sad part about attempting to untag from DSA is that we
339 * don't know, unless we check, if the skb will end up in
340 * the bridge's data path - br_allowed_ingress() - or not.
341 * For example, there might be an 8021q upper for the
342 * default_pvid of the bridge, which will steal VLAN-tagged traffic
343 * from the bridge's data path. This is a configuration that DSA
344 * supports because vlan_filtering is 0. In that case, we should
345 * definitely keep the tag, to make sure it keeps working.
346 */
Florian Fainelli3a688442020-10-01 19:42:15 -0700347 upper_dev = __vlan_find_dev_deep_rcu(br, htons(proto), vid);
348 if (upper_dev)
349 return skb;
Vladimir Oltean412a1522020-09-23 14:40:37 -0700350
351 __vlan_hwaccel_clear_tag(skb);
352
353 return skb;
354}
355
Vivien Didelotf515f192017-02-03 13:20:20 -0500356/* switch.c */
357int dsa_switch_register_notifier(struct dsa_switch *ds);
358void dsa_switch_unregister_notifier(struct dsa_switch *ds);
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200359
360/* dsa2.c */
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100361void dsa_lag_map(struct dsa_switch_tree *dst, struct net_device *lag);
362void dsa_lag_unmap(struct dsa_switch_tree *dst, struct net_device *lag);
Vladimir Oltean886f8e22021-01-29 03:00:04 +0200363int dsa_tree_notify(struct dsa_switch_tree *dst, unsigned long e, void *v);
364int dsa_broadcast(unsigned long e, void *v);
Vladimir Oltean53da0eb2021-01-29 03:00:06 +0200365int dsa_tree_change_tag_proto(struct dsa_switch_tree *dst,
366 struct net_device *master,
367 const struct dsa_device_ops *tag_ops,
368 const struct dsa_device_ops *old_tag_ops);
Tobias Waldekranz058102a2021-01-13 09:42:53 +0100369
Vladimir Olteanbff33f72020-03-27 21:55:43 +0200370extern struct list_head dsa_tree_list;
371
Lennert Buytenhek91da11f2008-10-07 13:44:02 +0000372#endif