blob: 0e3dbc5f3c34f83203ffafcb944a89fed8043b25 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Device handling code
4 * Linux ethernet bridge
5 *
6 * Authors:
7 * Lennert Buytenhek <buytenh@gnu.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
10#include <linux/kernel.h>
11#include <linux/netdevice.h>
WANG Congc06ee962010-05-06 00:48:24 -070012#include <linux/netpoll.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080013#include <linux/etherdevice.h>
Stephen Hemmingeredb5e462005-12-21 19:00:58 -080014#include <linux/ethtool.h>
WANG Congc06ee962010-05-06 00:48:24 -070015#include <linux/list.h>
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020016#include <linux/netfilter_bridge.h>
Stephen Hemminger4505a3e2005-12-21 18:51:49 -080017
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080018#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include "br_private.h"
20
Vlad Yasevich161f65b2013-05-22 07:49:34 +000021#define COMMON_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA | \
22 NETIF_F_GSO_MASK | NETIF_F_HW_CSUM)
23
Pablo Neira Ayuso1a4ba642015-03-10 10:27:18 +010024const struct nf_br_ops __rcu *nf_br_ops __read_mostly;
25EXPORT_SYMBOL_GPL(nf_br_ops);
26
stephen hemmingereeaf61d2010-07-27 08:26:30 +000027/* net device transmit always called with BH disabled */
Stephen Hemminger6fef4c02009-08-31 19:50:41 +000028netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070029{
30 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -070031 struct net_bridge_fdb_entry *dst;
Herbert Xuc4fcb782010-02-27 19:41:48 +000032 struct net_bridge_mdb_entry *mdst;
Li RongQing8f849852014-01-04 13:57:59 +080033 struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
Pablo Neira Ayuso1a4ba642015-03-10 10:27:18 +010034 const struct nf_br_ops *nf_ops;
Nikolay Aleksandrova580c762020-01-24 13:40:22 +020035 u8 state = BR_STATE_FORWARDING;
Nikolay Aleksandrov31a45622017-07-13 16:09:10 +030036 const unsigned char *dest;
Vlad Yasevich78851982013-02-13 12:00:14 +000037 u16 vid = 0;
stephen hemminger14bb4782010-03-02 13:32:09 +000038
Stephen Hemmingerc03307e2012-08-14 08:19:33 -070039 rcu_read_lock();
Pablo Neira Ayuso1a4ba642015-03-10 10:27:18 +010040 nf_ops = rcu_dereference(nf_br_ops);
41 if (nf_ops && nf_ops->br_dev_xmit_hook(skb)) {
Stephen Hemmingerc03307e2012-08-14 08:19:33 -070042 rcu_read_unlock();
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020043 return NETDEV_TX_OK;
44 }
Bart De Schuymerea2d9b42010-04-15 12:14:51 +020045
Eric Dumazet406818f2010-06-23 13:00:48 -070046 u64_stats_update_begin(&brstats->syncp);
stephen hemminger14bb4782010-03-02 13:32:09 +000047 brstats->tx_packets++;
48 brstats->tx_bytes += skb->len;
Eric Dumazet406818f2010-06-23 13:00:48 -070049 u64_stats_update_end(&brstats->syncp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Ido Schimmelf1c2edd2017-09-03 17:44:13 +030051 br_switchdev_frame_unmark(skb);
Herbert Xu6088a532010-02-27 19:41:42 +000052 BR_INPUT_SKB_CB(skb)->brdev = dev;
Pablo Neira Ayuso3c171f42019-05-29 13:25:37 +020053 BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
Herbert Xu6088a532010-02-27 19:41:42 +000054
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -070055 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 skb_pull(skb, ETH_HLEN);
57
Nikolay Aleksandrova580c762020-01-24 13:40:22 +020058 if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state))
Toshiaki Makita12464bb2014-03-27 21:46:55 +090059 goto out;
60
Roopa Prabhu057658c2017-10-06 22:12:38 -070061 if (IS_ENABLED(CONFIG_INET) &&
Nikolay Aleksandrov823d81b2020-02-24 18:46:22 +020062 (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
63 eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
Nikolay Aleksandrovc69c2cd2018-09-26 17:01:05 +030064 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
Roopa Prabhu057658c2017-10-06 22:12:38 -070065 br_do_proxy_suppress_arp(skb, br, vid, NULL);
Roopa Prabhued842fa2017-10-06 22:12:39 -070066 } else if (IS_ENABLED(CONFIG_IPV6) &&
67 skb->protocol == htons(ETH_P_IPV6) &&
Nikolay Aleksandrovc69c2cd2018-09-26 17:01:05 +030068 br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) &&
Roopa Prabhued842fa2017-10-06 22:12:39 -070069 pskb_may_pull(skb, sizeof(struct ipv6hdr) +
70 sizeof(struct nd_msg)) &&
71 ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) {
72 struct nd_msg *msg, _msg;
73
74 msg = br_is_nd_neigh_msg(skb, &_msg);
75 if (msg)
76 br_do_suppress_nd(skb, br, vid, NULL, msg);
Roopa Prabhu057658c2017-10-06 22:12:38 -070077 }
78
Nikolay Aleksandrov31a45622017-07-13 16:09:10 +030079 dest = eth_hdr(skb)->h_dest;
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030080 if (is_broadcast_ether_addr(dest)) {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020081 br_flood(br, skb, BR_PKT_BROADCAST, false, true);
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030082 } else if (is_multicast_ether_addr(dest)) {
Herbert Xu91d2c342010-06-10 16:12:50 +000083 if (unlikely(netpoll_tx_running(dev))) {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020084 br_flood(br, skb, BR_PKT_MULTICAST, false, true);
Herbert Xu91d2c342010-06-10 16:12:50 +000085 goto out;
86 }
Vlad Yasevich06499092013-10-28 15:45:07 -040087 if (br_multicast_rcv(br, NULL, skb, vid)) {
Herbert Xu6d1d1d32010-07-29 01:12:31 +000088 kfree_skb(skb);
Herbert Xuc4fcb782010-02-27 19:41:48 +000089 goto out;
Herbert Xu6d1d1d32010-07-29 01:12:31 +000090 }
Herbert Xuc4fcb782010-02-27 19:41:48 +000091
Cong Wangfbca58a2013-03-07 03:05:33 +000092 mdst = br_mdb_get(br, skb, vid);
Linus Lüssingb00589a2013-08-01 01:06:20 +020093 if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) &&
Linus Lüssingcc0fdd82013-08-30 17:28:17 +020094 br_multicast_querier_exists(br, eth_hdr(skb)))
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030095 br_multicast_flood(mdst, skb, false, true);
Herbert Xuc4fcb782010-02-27 19:41:48 +000096 else
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +020097 br_flood(br, skb, BR_PKT_MULTICAST, false, true);
Nikolay Aleksandrovbfd0aea2017-02-13 14:59:09 +010098 } else if ((dst = br_fdb_find_rcu(br, dest, vid)) != NULL) {
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +030099 br_forward(dst->dst, skb, false, true);
100 } else {
Nikolay Aleksandrov8addd5e2016-08-31 15:36:51 +0200101 br_flood(br, skb, BR_PKT_UNICAST, false, true);
Nikolay Aleksandrov37b090e2016-07-14 06:10:02 +0300102 }
Herbert Xuc4fcb782010-02-27 19:41:48 +0000103out:
stephen hemmingereeaf61d2010-07-27 08:26:30 +0000104 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +0000105 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106}
107
stephen hemmingerbb900b22011-04-04 14:03:32 +0000108static int br_dev_init(struct net_device *dev)
109{
110 struct net_bridge *br = netdev_priv(dev);
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400111 int err;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000112
WANG Cong1c213bd2014-02-13 11:46:28 -0800113 br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000114 if (!br->stats)
115 return -ENOMEM;
116
Nikolay Aleksandroveb793582017-12-12 16:02:50 +0200117 err = br_fdb_hash_init(br);
118 if (err) {
119 free_percpu(br->stats);
120 return err;
121 }
122
Nikolay Aleksandrov19e3a9c2018-12-05 15:14:24 +0200123 err = br_mdb_hash_init(br);
124 if (err) {
125 free_percpu(br->stats);
126 br_fdb_hash_fini(br);
127 return err;
128 }
129
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400130 err = br_vlan_init(br);
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200131 if (err) {
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400132 free_percpu(br->stats);
Nikolay Aleksandrov19e3a9c2018-12-05 15:14:24 +0200133 br_mdb_hash_fini(br);
Nikolay Aleksandroveb793582017-12-12 16:02:50 +0200134 br_fdb_hash_fini(br);
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200135 return err;
136 }
137
138 err = br_multicast_init_stats(br);
139 if (err) {
140 free_percpu(br->stats);
141 br_vlan_flush(br);
Nikolay Aleksandrov19e3a9c2018-12-05 15:14:24 +0200142 br_mdb_hash_fini(br);
Nikolay Aleksandroveb793582017-12-12 16:02:50 +0200143 br_fdb_hash_fini(br);
Nikolay Aleksandrov1080ab92016-06-28 16:57:06 +0200144 }
Vlad Yasevich5be5a2d2014-10-03 11:29:18 -0400145
146 return err;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000147}
148
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300149static void br_dev_uninit(struct net_device *dev)
150{
151 struct net_bridge *br = netdev_priv(dev);
152
Xin Longb1b9d362017-04-25 22:58:37 +0800153 br_multicast_dev_del(br);
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300154 br_multicast_uninit_stats(br);
155 br_vlan_flush(br);
Nikolay Aleksandrov19e3a9c2018-12-05 15:14:24 +0200156 br_mdb_hash_fini(br);
Nikolay Aleksandroveb793582017-12-12 16:02:50 +0200157 br_fdb_hash_fini(br);
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300158 free_percpu(br->stats);
159}
160
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161static int br_dev_open(struct net_device *dev)
162{
Stephen Hemminger81d35302005-05-29 14:15:17 -0700163 struct net_bridge *br = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000165 netdev_update_features(dev);
Stephen Hemminger81d35302005-05-29 14:15:17 -0700166 netif_start_queue(dev);
167 br_stp_enable_bridge(br);
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800168 br_multicast_open(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170 return 0;
171}
172
173static void br_dev_set_multicast_list(struct net_device *dev)
174{
175}
176
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400177static void br_dev_change_rx_flags(struct net_device *dev, int change)
178{
179 if (change & IFF_PROMISC)
180 br_manage_promisc(netdev_priv(dev));
181}
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183static int br_dev_stop(struct net_device *dev)
184{
Herbert Xu3fe2d7c2010-02-28 00:49:38 -0800185 struct net_bridge *br = netdev_priv(dev);
186
187 br_stp_disable_bridge(br);
188 br_multicast_stop(br);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190 netif_stop_queue(dev);
191
192 return 0;
193}
194
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800195static void br_get_stats64(struct net_device *dev,
196 struct rtnl_link_stats64 *stats)
stephen hemminger14bb4782010-03-02 13:32:09 +0000197{
198 struct net_bridge *br = netdev_priv(dev);
Li RongQing8f849852014-01-04 13:57:59 +0800199 struct pcpu_sw_netstats tmp, sum = { 0 };
stephen hemminger14bb4782010-03-02 13:32:09 +0000200 unsigned int cpu;
201
202 for_each_possible_cpu(cpu) {
Eric Dumazet406818f2010-06-23 13:00:48 -0700203 unsigned int start;
Li RongQing8f849852014-01-04 13:57:59 +0800204 const struct pcpu_sw_netstats *bstats
stephen hemminger14bb4782010-03-02 13:32:09 +0000205 = per_cpu_ptr(br->stats, cpu);
Eric Dumazet406818f2010-06-23 13:00:48 -0700206 do {
Eric W. Biederman57a77442014-03-13 21:26:42 -0700207 start = u64_stats_fetch_begin_irq(&bstats->syncp);
Eric Dumazet406818f2010-06-23 13:00:48 -0700208 memcpy(&tmp, bstats, sizeof(tmp));
Eric W. Biederman57a77442014-03-13 21:26:42 -0700209 } while (u64_stats_fetch_retry_irq(&bstats->syncp, start));
Eric Dumazet406818f2010-06-23 13:00:48 -0700210 sum.tx_bytes += tmp.tx_bytes;
211 sum.tx_packets += tmp.tx_packets;
212 sum.rx_bytes += tmp.rx_bytes;
213 sum.rx_packets += tmp.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000214 }
215
216 stats->tx_bytes = sum.tx_bytes;
217 stats->tx_packets = sum.tx_packets;
218 stats->rx_bytes = sum.rx_bytes;
219 stats->rx_packets = sum.rx_packets;
stephen hemminger14bb4782010-03-02 13:32:09 +0000220}
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222static int br_change_mtu(struct net_device *dev, int new_mtu)
223{
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700224 struct net_bridge *br = netdev_priv(dev);
Nikolay Aleksandrovf40aa232018-03-30 13:46:18 +0300225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 dev->mtu = new_mtu;
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700227
Nikolay Aleksandrov804b8542018-03-30 13:46:19 +0300228 /* this flag will be cleared if the MTU was automatically adjusted */
Nikolay Aleksandrov3341d912018-09-26 17:01:06 +0300229 br_opt_toggle(br, BROPT_MTU_SET_BY_USER, true);
Pablo Neira Ayuso34666d42014-09-18 11:29:03 +0200230#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700231 /* remember the MTU in the rtable for PMTU */
David S. Millerdefb3512010-12-08 21:16:57 -0800232 dst_metric_set(&br->fake_rtable.dst, RTAX_MTU, new_mtu);
Simon Wunderlich4adf0af2008-07-30 16:27:55 -0700233#endif
234
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 return 0;
236}
237
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700238/* Allow setting mac address to any valid ethernet address. */
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800239static int br_set_mac_address(struct net_device *dev, void *p)
240{
241 struct net_bridge *br = netdev_priv(dev);
242 struct sockaddr *addr = p;
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700243
244 if (!is_valid_ether_addr(addr->sa_data))
Danny Kukawka7ca1e112012-02-21 02:07:52 +0000245 return -EADDRNOTAVAIL;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800246
Nikolay Aleksandrovc4b4c422019-12-03 16:48:06 +0200247 /* dev_set_mac_addr() can be called by a master device on bridge's
248 * NETDEV_UNREGISTER, but since it's being destroyed do nothing
249 */
250 if (dev->reg_state != NETREG_REGISTERED)
251 return -EBUSY;
252
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800253 spin_lock_bh(&br->lock);
Joe Perches9a7b6ef92012-05-08 18:56:49 +0000254 if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
Toshiaki Makitaa3ebb7e2014-02-07 16:48:20 +0900255 /* Mac address will be changed in br_stp_change_bridge_id(). */
stephen hemminger43598812011-12-08 07:17:49 +0000256 br_stp_change_bridge_id(br, addr->sa_data);
257 }
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800258 spin_unlock_bh(&br->lock);
259
Stephen Hemmingerffe1d492007-04-09 11:49:58 -0700260 return 0;
Stephen Hemminger4505a3e2005-12-21 18:51:49 -0800261}
262
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800263static void br_getinfo(struct net_device *dev, struct ethtool_drvinfo *info)
264{
Jiri Pirko7826d432013-01-06 00:44:26 +0000265 strlcpy(info->driver, "bridge", sizeof(info->driver));
266 strlcpy(info->version, BR_VERSION, sizeof(info->version));
267 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
268 strlcpy(info->bus_info, "N/A", sizeof(info->bus_info));
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800269}
270
Matthias Schiffer542575f2019-11-12 22:12:25 +0100271static int br_get_link_ksettings(struct net_device *dev,
272 struct ethtool_link_ksettings *cmd)
273{
274 struct net_bridge *br = netdev_priv(dev);
275 struct net_bridge_port *p;
276
277 cmd->base.duplex = DUPLEX_UNKNOWN;
278 cmd->base.port = PORT_OTHER;
279 cmd->base.speed = SPEED_UNKNOWN;
280
281 list_for_each_entry(p, &br->port_list, list) {
282 struct ethtool_link_ksettings ecmd;
283 struct net_device *pdev = p->dev;
284
285 if (!netif_running(pdev) || !netif_oper_up(pdev))
286 continue;
287
288 if (__ethtool_get_link_ksettings(pdev, &ecmd))
289 continue;
290
291 if (ecmd.base.speed == (__u32)SPEED_UNKNOWN)
292 continue;
293
294 if (cmd->base.speed == (__u32)SPEED_UNKNOWN ||
295 cmd->base.speed < ecmd.base.speed)
296 cmd->base.speed = ecmd.base.speed;
297 }
298
299 return 0;
300}
301
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000302static netdev_features_t br_fix_features(struct net_device *dev,
303 netdev_features_t features)
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800304{
305 struct net_bridge *br = netdev_priv(dev);
306
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000307 return br_features_recompute(br, features);
Jesse Gross361ff8a2010-10-20 13:56:08 +0000308}
309
WANG Congc06ee962010-05-06 00:48:24 -0700310#ifdef CONFIG_NET_POLL_CONTROLLER
WANG Congc06ee962010-05-06 00:48:24 -0700311static void br_poll_controller(struct net_device *br_dev)
312{
WANG Congc06ee962010-05-06 00:48:24 -0700313}
314
Herbert Xu91d2c342010-06-10 16:12:50 +0000315static void br_netpoll_cleanup(struct net_device *dev)
WANG Congc06ee962010-05-06 00:48:24 -0700316{
stephen hemmingercfb478d2010-05-10 09:31:08 +0000317 struct net_bridge *br = netdev_priv(dev);
Amerigo Wang4e3828c2012-08-10 01:24:44 +0000318 struct net_bridge_port *p;
WANG Congc06ee962010-05-06 00:48:24 -0700319
Amerigo Wang4e3828c2012-08-10 01:24:44 +0000320 list_for_each_entry(p, &br->port_list, list)
Herbert Xu91d2c342010-06-10 16:12:50 +0000321 br_netpoll_disable(p);
WANG Congc06ee962010-05-06 00:48:24 -0700322}
323
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700324static int __br_netpoll_enable(struct net_bridge_port *p)
stephen hemmingercfb478d2010-05-10 09:31:08 +0000325{
Herbert Xu91d2c342010-06-10 16:12:50 +0000326 struct netpoll *np;
stephen hemminger93d8bf92013-07-24 11:51:41 -0700327 int err;
328
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700329 np = kzalloc(sizeof(*p->np), GFP_KERNEL);
Herbert Xu91d2c342010-06-10 16:12:50 +0000330 if (!np)
stephen hemminger93d8bf92013-07-24 11:51:41 -0700331 return -ENOMEM;
Herbert Xu91d2c342010-06-10 16:12:50 +0000332
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700333 err = __netpoll_setup(np, p->dev);
Herbert Xu91d2c342010-06-10 16:12:50 +0000334 if (err) {
335 kfree(np);
stephen hemminger93d8bf92013-07-24 11:51:41 -0700336 return err;
stephen hemmingercfb478d2010-05-10 09:31:08 +0000337 }
Herbert Xu91d2c342010-06-10 16:12:50 +0000338
339 p->np = np;
Herbert Xu91d2c342010-06-10 16:12:50 +0000340 return err;
341}
342
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700343int br_netpoll_enable(struct net_bridge_port *p)
Cong Wangdbe17302014-02-06 15:00:52 -0800344{
345 if (!p->br->dev->npinfo)
346 return 0;
347
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700348 return __br_netpoll_enable(p);
Cong Wangdbe17302014-02-06 15:00:52 -0800349}
350
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700351static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni)
Cong Wangdbe17302014-02-06 15:00:52 -0800352{
353 struct net_bridge *br = netdev_priv(dev);
354 struct net_bridge_port *p;
355 int err = 0;
356
357 list_for_each_entry(p, &br->port_list, list) {
358 if (!p->dev)
359 continue;
Eric W. Biedermana8779ec2014-03-27 15:36:38 -0700360 err = __br_netpoll_enable(p);
Cong Wangdbe17302014-02-06 15:00:52 -0800361 if (err)
362 goto fail;
363 }
364
365out:
366 return err;
367
368fail:
369 br_netpoll_cleanup(dev);
370 goto out;
371}
372
Herbert Xu91d2c342010-06-10 16:12:50 +0000373void br_netpoll_disable(struct net_bridge_port *p)
374{
375 struct netpoll *np = p->np;
376
377 if (!np)
378 return;
379
380 p->np = NULL;
381
Debabrata Banerjeec9fbd712018-10-18 11:18:26 -0400382 __netpoll_free(np);
WANG Congc06ee962010-05-06 00:48:24 -0700383}
384
385#endif
386
David Ahern33eaf2a2017-10-04 17:48:46 -0700387static int br_add_slave(struct net_device *dev, struct net_device *slave_dev,
388 struct netlink_ext_ack *extack)
Jiri Pirkoafc61512011-02-13 09:33:42 +0000389
390{
391 struct net_bridge *br = netdev_priv(dev);
392
David Ahernca752be2017-10-04 17:48:50 -0700393 return br_add_if(br, slave_dev, extack);
Jiri Pirkoafc61512011-02-13 09:33:42 +0000394}
395
396static int br_del_slave(struct net_device *dev, struct net_device *slave_dev)
397{
398 struct net_bridge *br = netdev_priv(dev);
399
400 return br_del_if(br, slave_dev);
401}
402
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800403static const struct ethtool_ops br_ethtool_ops = {
Matthias Schiffer542575f2019-11-12 22:12:25 +0100404 .get_drvinfo = br_getinfo,
405 .get_link = ethtool_op_get_link,
406 .get_link_ksettings = br_get_link_ksettings,
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800407};
408
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800409static const struct net_device_ops br_netdev_ops = {
410 .ndo_open = br_dev_open,
411 .ndo_stop = br_dev_stop,
stephen hemmingerbb900b22011-04-04 14:03:32 +0000412 .ndo_init = br_dev_init,
Ido Schimmelb6fe0442017-04-10 14:59:27 +0300413 .ndo_uninit = br_dev_uninit,
Stephen Hemminger00829822008-11-20 20:14:53 -0800414 .ndo_start_xmit = br_dev_xmit,
Eric Dumazet406818f2010-06-23 13:00:48 -0700415 .ndo_get_stats64 = br_get_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800416 .ndo_set_mac_address = br_set_mac_address,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000417 .ndo_set_rx_mode = br_dev_set_multicast_list,
Vlad Yasevich2796d0c2014-05-16 09:59:20 -0400418 .ndo_change_rx_flags = br_dev_change_rx_flags,
Stephen Hemminger00829822008-11-20 20:14:53 -0800419 .ndo_change_mtu = br_change_mtu,
420 .ndo_do_ioctl = br_dev_ioctl,
WANG Congc06ee962010-05-06 00:48:24 -0700421#ifdef CONFIG_NET_POLL_CONTROLLER
Herbert Xu91d2c342010-06-10 16:12:50 +0000422 .ndo_netpoll_setup = br_netpoll_setup,
WANG Congc06ee962010-05-06 00:48:24 -0700423 .ndo_netpoll_cleanup = br_netpoll_cleanup,
424 .ndo_poll_controller = br_poll_controller,
425#endif
Jiri Pirkoafc61512011-02-13 09:33:42 +0000426 .ndo_add_slave = br_add_slave,
427 .ndo_del_slave = br_del_slave,
Michał Mirosławc4d27ef2011-04-22 06:31:16 +0000428 .ndo_fix_features = br_fix_features,
John Fastabend77162022012-04-15 06:43:56 +0000429 .ndo_fdb_add = br_fdb_add,
430 .ndo_fdb_del = br_fdb_delete,
431 .ndo_fdb_dump = br_fdb_dump,
Roopa Prabhu47674562018-12-15 22:35:09 -0800432 .ndo_fdb_get = br_fdb_get,
John Fastabende5a55a82012-10-24 08:12:57 +0000433 .ndo_bridge_getlink = br_getlink,
434 .ndo_bridge_setlink = br_setlink,
Vlad Yasevich407af322013-02-13 12:00:12 +0000435 .ndo_bridge_dellink = br_dellink,
Toshiaki Makita66780532015-07-31 15:03:26 +0900436 .ndo_features_check = passthru_features_check,
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800437};
438
stephen hemmingerbb900b22011-04-04 14:03:32 +0000439static struct device_type br_type = {
440 .name = "bridge",
441};
442
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443void br_dev_setup(struct net_device *dev)
444{
stephen hemmingerbb900b22011-04-04 14:03:32 +0000445 struct net_bridge *br = netdev_priv(dev);
446
Danny Kukawka7ce5d222012-02-15 06:45:40 +0000447 eth_hw_addr_random(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 ether_setup(dev);
449
Stephen Hemmingera2dbb882008-11-19 21:49:00 -0800450 dev->netdev_ops = &br_netdev_ops;
David S. Millercf124db2017-05-08 12:52:56 -0400451 dev->needs_free_netdev = true;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +0000452 dev->ethtool_ops = &br_ethtool_ops;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000453 SET_NETDEV_DEVTYPE(dev, &br_type);
Phil Sutterccecb2a2015-08-18 10:30:37 +0200454 dev->priv_flags = IFF_EBRIDGE | IFF_NO_QUEUE;
Stephen Hemmingeredb5e462005-12-21 19:00:58 -0800455
Vlad Yasevich161f65b2013-05-22 07:49:34 +0000456 dev->features = COMMON_FEATURES | NETIF_F_LLTX | NETIF_F_NETNS_LOCAL |
Toshiaki Makita1c5abb62014-06-10 20:59:22 +0900457 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
458 dev->hw_features = COMMON_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
459 NETIF_F_HW_VLAN_STAG_TX;
Vlad Yasevich161f65b2013-05-22 07:49:34 +0000460 dev->vlan_features = COMMON_FEATURES;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000461
462 br->dev = dev;
463 spin_lock_init(&br->lock);
464 INIT_LIST_HEAD(&br->port_list);
Nikolay Aleksandroveb793582017-12-12 16:02:50 +0200465 INIT_HLIST_HEAD(&br->fdb_list);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000466 spin_lock_init(&br->hash_lock);
467
468 br->bridge_id.prio[0] = 0x80;
469 br->bridge_id.prio[1] = 0x00;
470
Egil Hjelmeland05428722017-11-02 10:36:48 +0100471 ether_addr_copy(br->group_addr, eth_stp_addr);
stephen hemmingerbb900b22011-04-04 14:03:32 +0000472
stephen hemmingerbb900b22011-04-04 14:03:32 +0000473 br->stp_enabled = BR_NO_STP;
stephen hemminger515853c2011-10-03 18:14:46 +0000474 br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
Toshiaki Makitaf2808d22014-06-10 20:59:24 +0900475 br->group_fwd_mask_required = BR_GROUPFWD_DEFAULT;
stephen hemminger515853c2011-10-03 18:14:46 +0000476
stephen hemmingerbb900b22011-04-04 14:03:32 +0000477 br->designated_root = br->bridge_id;
478 br->bridge_max_age = br->max_age = 20 * HZ;
479 br->bridge_hello_time = br->hello_time = 2 * HZ;
480 br->bridge_forward_delay = br->forward_delay = 15 * HZ;
Vivien Didelot34d8acd2016-12-10 13:44:29 -0500481 br->bridge_ageing_time = br->ageing_time = BR_DEFAULT_AGEING_TIME;
Jarod Wilson91572082016-10-20 13:55:20 -0400482 dev->max_mtu = ETH_MAX_MTU;
stephen hemmingerbb900b22011-04-04 14:03:32 +0000483
484 br_netfilter_rtable_init(br);
485 br_stp_timer_init(br);
486 br_multicast_init(br);
Nikolay Aleksandrovf7cdee82017-02-04 18:05:07 +0100487 INIT_DELAYED_WORK(&br->gc_work, br_fdb_cleanup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}