Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Handle incoming frames |
| 4 | * Linux ethernet bridge |
| 5 | * |
| 6 | * Authors: |
| 7 | * Lennert Buytenhek <buytenh@gnu.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/netdevice.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/netfilter_bridge.h> |
Stephen Rothwell | dc2f418 | 2019-04-13 14:03:36 +1000 | [diff] [blame] | 15 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 16 | #include <net/netfilter/nf_queue.h> |
Stephen Rothwell | dc2f418 | 2019-04-13 14:03:36 +1000 | [diff] [blame] | 17 | #endif |
Kyeyoon Park | 9585011 | 2014-10-23 14:49:17 -0700 | [diff] [blame] | 18 | #include <linux/neighbour.h> |
| 19 | #include <net/arp.h> |
Vladimir Oltean | 9eb8eff | 2020-05-10 19:37:40 +0300 | [diff] [blame] | 20 | #include <net/dsa.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 21 | #include <linux/export.h> |
Vlad Yasevich | a37b85c | 2013-02-13 12:00:10 +0000 | [diff] [blame] | 22 | #include <linux/rculist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "br_private.h" |
Roopa Prabhu | 11538d0 | 2017-01-31 22:59:55 -0800 | [diff] [blame] | 24 | #include "br_private_tunnel.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 26 | static int |
| 27 | br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) |
Eric W. Biederman | 04eb448 | 2015-09-15 20:04:15 -0500 | [diff] [blame] | 28 | { |
Florian Westphal | a13b208 | 2017-03-13 17:38:17 +0100 | [diff] [blame] | 29 | br_drop_fake_rtable(skb); |
Eric W. Biederman | 04eb448 | 2015-09-15 20:04:15 -0500 | [diff] [blame] | 30 | return netif_receive_skb(skb); |
| 31 | } |
| 32 | |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 33 | static int br_pass_frame_up(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | { |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 35 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
stephen hemminger | 14bb478 | 2010-03-02 13:32:09 +0000 | [diff] [blame] | 36 | struct net_bridge *br = netdev_priv(brdev); |
Nikolay Aleksandrov | 2594e906 | 2015-09-25 19:00:11 +0200 | [diff] [blame] | 37 | struct net_bridge_vlan_group *vg; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | |
Heiner Kallweit | 7609ecb | 2020-11-20 12:22:23 +0100 | [diff] [blame] | 39 | dev_sw_netstats_rx_add(brdev, skb->len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | |
Nikolay Aleksandrov | 907b1e6 | 2015-10-12 21:47:02 +0200 | [diff] [blame] | 41 | vg = br_vlan_group_rcu(br); |
Vlad Yasevich | 85f46c6 | 2013-02-13 12:00:11 +0000 | [diff] [blame] | 42 | /* Bridge is just like any other port. Make sure the |
Menglong Dong | efb5b33 | 2021-01-07 18:53:32 -0800 | [diff] [blame] | 43 | * packet is allowed except in promisc mode when someone |
Vlad Yasevich | 85f46c6 | 2013-02-13 12:00:11 +0000 | [diff] [blame] | 44 | * may be running packet capture. |
| 45 | */ |
| 46 | if (!(brdev->flags & IFF_PROMISC) && |
Nikolay Aleksandrov | 2594e906 | 2015-09-25 19:00:11 +0200 | [diff] [blame] | 47 | !br_allowed_egress(vg, skb)) { |
Vlad Yasevich | 85f46c6 | 2013-02-13 12:00:11 +0000 | [diff] [blame] | 48 | kfree_skb(skb); |
| 49 | return NET_RX_DROP; |
| 50 | } |
| 51 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | indev = skb->dev; |
Pavel Emelyanov | a339f1c | 2008-05-21 14:13:47 -0700 | [diff] [blame] | 53 | skb->dev = brdev; |
Roopa Prabhu | 11538d0 | 2017-01-31 22:59:55 -0800 | [diff] [blame] | 54 | skb = br_handle_vlan(br, NULL, vg, skb); |
Vlad Yasevich | fc92f74 | 2014-03-27 21:51:18 -0400 | [diff] [blame] | 55 | if (!skb) |
| 56 | return NET_RX_DROP; |
Nikolay Aleksandrov | 1080ab9 | 2016-06-28 16:57:06 +0200 | [diff] [blame] | 57 | /* update the multicast stats if the packet is IGMP/MLD */ |
Nikolay Aleksandrov | a65056e | 2016-07-06 12:12:21 -0700 | [diff] [blame] | 58 | br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb), |
Nikolay Aleksandrov | 1080ab9 | 2016-06-28 16:57:06 +0200 | [diff] [blame] | 59 | BR_MCAST_DIR_TX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 61 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, |
| 62 | dev_net(indev), NULL, skb, indev, NULL, |
Eric W. Biederman | 04eb448 | 2015-09-15 20:04:15 -0500 | [diff] [blame] | 63 | br_netif_receive_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | } |
| 65 | |
stephen hemminger | eeaf61d | 2010-07-27 08:26:30 +0000 | [diff] [blame] | 66 | /* note: already called with rcu_read_lock */ |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 67 | int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | { |
Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 69 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 70 | enum br_pkt_type pkt_type = BR_PKT_UNICAST; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 71 | struct net_bridge_fdb_entry *dst = NULL; |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 72 | struct net_bridge_mcast_port *pmctx; |
Herbert Xu | c4fcb78 | 2010-02-27 19:41:48 +0000 | [diff] [blame] | 73 | struct net_bridge_mdb_entry *mdst; |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 74 | bool local_rcv, mcast_hit = false; |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 75 | struct net_bridge_mcast *brmctx; |
Nikolay Aleksandrov | f4b7002 | 2021-07-19 20:06:28 +0300 | [diff] [blame] | 76 | struct net_bridge_vlan *vlan; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 77 | struct net_bridge *br; |
Vlad Yasevich | 7885198 | 2013-02-13 12:00:14 +0000 | [diff] [blame] | 78 | u16 vid = 0; |
Nikolay Aleksandrov | a580c76 | 2020-01-24 13:40:22 +0200 | [diff] [blame] | 79 | u8 state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 80 | |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 81 | if (!p || p->state == BR_STATE_DISABLED) |
| 82 | goto drop; |
Stephen Hemminger | 85967bb | 2005-05-29 14:15:55 -0700 | [diff] [blame] | 83 | |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 84 | brmctx = &p->br->multicast_ctx; |
| 85 | pmctx = &p->multicast_ctx; |
Nikolay Aleksandrov | a580c76 | 2020-01-24 13:40:22 +0200 | [diff] [blame] | 86 | state = p->state; |
| 87 | if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid, |
Nikolay Aleksandrov | f4b7002 | 2021-07-19 20:06:28 +0300 | [diff] [blame] | 88 | &state, &vlan)) |
Toshiaki Makita | eb70761 | 2014-04-09 17:00:30 +0900 | [diff] [blame] | 89 | goto out; |
Vlad Yasevich | a37b85c | 2013-02-13 12:00:10 +0000 | [diff] [blame] | 90 | |
Ido Schimmel | 6bc506b | 2016-08-25 18:42:37 +0200 | [diff] [blame] | 91 | nbp_switchdev_frame_mark(p, skb); |
| 92 | |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 93 | /* insert into forwarding database after filtering to avoid spoofing */ |
| 94 | br = p->br; |
Vlad Yasevich | 9ba1889 | 2013-06-05 10:08:00 -0400 | [diff] [blame] | 95 | if (p->flags & BR_LEARNING) |
Nikolay Aleksandrov | be0c567 | 2019-11-01 14:46:37 +0200 | [diff] [blame] | 96 | br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0); |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 97 | |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 98 | local_rcv = !!(br->dev->flags & IFF_PROMISC); |
Nikolay Aleksandrov | 3d26eb8 | 2019-07-02 15:00:20 +0300 | [diff] [blame] | 99 | if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) { |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 100 | /* by definition the broadcast is also a multicast address */ |
Nikolay Aleksandrov | 3d26eb8 | 2019-07-02 15:00:20 +0300 | [diff] [blame] | 101 | if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) { |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 102 | pkt_type = BR_PKT_BROADCAST; |
| 103 | local_rcv = true; |
| 104 | } else { |
| 105 | pkt_type = BR_PKT_MULTICAST; |
Nikolay Aleksandrov | f4b7002 | 2021-07-19 20:06:28 +0300 | [diff] [blame] | 106 | if (br_multicast_rcv(&brmctx, &pmctx, vlan, skb, vid)) |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 107 | goto drop; |
| 108 | } |
| 109 | } |
Herbert Xu | c4fcb78 | 2010-02-27 19:41:48 +0000 | [diff] [blame] | 110 | |
Nikolay Aleksandrov | a580c76 | 2020-01-24 13:40:22 +0200 | [diff] [blame] | 111 | if (state == BR_STATE_LEARNING) |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 112 | goto drop; |
Stephen Hemminger | 0e5eaba | 2005-12-21 19:00:18 -0800 | [diff] [blame] | 113 | |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 114 | BR_INPUT_SKB_CB(skb)->brdev = br->dev; |
Nikolay Aleksandrov | 7d850ab | 2018-05-24 11:56:48 +0300 | [diff] [blame] | 115 | BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED); |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 116 | |
Roopa Prabhu | 057658c | 2017-10-06 22:12:38 -0700 | [diff] [blame] | 117 | if (IS_ENABLED(CONFIG_INET) && |
| 118 | (skb->protocol == htons(ETH_P_ARP) || |
| 119 | skb->protocol == htons(ETH_P_RARP))) { |
| 120 | br_do_proxy_suppress_arp(skb, br, vid, p); |
Roopa Prabhu | ed842fa | 2017-10-06 22:12:39 -0700 | [diff] [blame] | 121 | } else if (IS_ENABLED(CONFIG_IPV6) && |
| 122 | skb->protocol == htons(ETH_P_IPV6) && |
Nikolay Aleksandrov | c69c2cd | 2018-09-26 17:01:05 +0300 | [diff] [blame] | 123 | br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && |
Roopa Prabhu | ed842fa | 2017-10-06 22:12:39 -0700 | [diff] [blame] | 124 | pskb_may_pull(skb, sizeof(struct ipv6hdr) + |
| 125 | sizeof(struct nd_msg)) && |
| 126 | ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { |
| 127 | struct nd_msg *msg, _msg; |
| 128 | |
| 129 | msg = br_is_nd_neigh_msg(skb, &_msg); |
| 130 | if (msg) |
| 131 | br_do_suppress_nd(skb, br, vid, p, msg); |
Roopa Prabhu | 057658c | 2017-10-06 22:12:38 -0700 | [diff] [blame] | 132 | } |
Kyeyoon Park | 9585011 | 2014-10-23 14:49:17 -0700 | [diff] [blame] | 133 | |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 134 | switch (pkt_type) { |
| 135 | case BR_PKT_MULTICAST: |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 136 | mdst = br_mdb_get(brmctx, skb, vid); |
Linus Lüssing | b00589a | 2013-08-01 01:06:20 +0200 | [diff] [blame] | 137 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 138 | br_multicast_querier_exists(brmctx, eth_hdr(skb), mdst)) { |
Andrew Lunn | ff0fd34 | 2017-11-09 23:10:57 +0100 | [diff] [blame] | 139 | if ((mdst && mdst->host_joined) || |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 140 | br_multicast_is_router(brmctx, skb)) { |
Nikolay Aleksandrov | b35c5f6 | 2016-07-14 06:10:01 +0300 | [diff] [blame] | 141 | local_rcv = true; |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 142 | br->dev->stats.multicast++; |
| 143 | } |
| 144 | mcast_hit = true; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 145 | } else { |
Nikolay Aleksandrov | b35c5f6 | 2016-07-14 06:10:01 +0300 | [diff] [blame] | 146 | local_rcv = true; |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 147 | br->dev->stats.multicast++; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 148 | } |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 149 | break; |
| 150 | case BR_PKT_UNICAST: |
Nikolay Aleksandrov | 3d26eb8 | 2019-07-02 15:00:20 +0300 | [diff] [blame] | 151 | dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); |
Gustavo A. R. Silva | ecd1c6a | 2021-03-09 23:41:15 -0600 | [diff] [blame] | 152 | break; |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 153 | default: |
| 154 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | } |
| 156 | |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 157 | if (dst) { |
stephen hemminger | ca6d448 | 2017-02-07 08:46:46 -0800 | [diff] [blame] | 158 | unsigned long now = jiffies; |
| 159 | |
Nikolay Aleksandrov | 6869c3b | 2019-10-29 13:45:53 +0200 | [diff] [blame] | 160 | if (test_bit(BR_FDB_LOCAL, &dst->flags)) |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 161 | return br_pass_frame_up(skb); |
| 162 | |
stephen hemminger | ca6d448 | 2017-02-07 08:46:46 -0800 | [diff] [blame] | 163 | if (now != dst->used) |
| 164 | dst->used = now; |
Nikolay Aleksandrov | 37b090e | 2016-07-14 06:10:02 +0300 | [diff] [blame] | 165 | br_forward(dst->dst, skb, local_rcv, false); |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 166 | } else { |
| 167 | if (!mcast_hit) |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 168 | br_flood(br, skb, pkt_type, local_rcv, false); |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 169 | else |
Nikolay Aleksandrov | adc4703 | 2021-07-19 20:06:25 +0300 | [diff] [blame] | 170 | br_multicast_flood(mdst, skb, brmctx, local_rcv, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Nikolay Aleksandrov | b35c5f6 | 2016-07-14 06:10:01 +0300 | [diff] [blame] | 173 | if (local_rcv) |
| 174 | return br_pass_frame_up(skb); |
Herbert Xu | 87557c1 | 2010-02-27 19:41:39 +0000 | [diff] [blame] | 175 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | out: |
| 177 | return 0; |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 178 | drop: |
| 179 | kfree_skb(skb); |
| 180 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | } |
Pablo Neira Ayuso | 34666d4 | 2014-09-18 11:29:03 +0200 | [diff] [blame] | 182 | EXPORT_SYMBOL_GPL(br_handle_frame_finish); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 184 | static void __br_handle_local_finish(struct sk_buff *skb) |
Stephen Hemminger | cf0f02d | 2006-03-20 22:59:06 -0800 | [diff] [blame] | 185 | { |
Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 186 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
Vlad Yasevich | 2ba071e | 2013-02-13 12:00:16 +0000 | [diff] [blame] | 187 | u16 vid = 0; |
Stephen Hemminger | cf0f02d | 2006-03-20 22:59:06 -0800 | [diff] [blame] | 188 | |
Toshiaki Makita | e0d7968 | 2014-05-26 15:15:53 +0900 | [diff] [blame] | 189 | /* check if vlan is allowed, to avoid spoofing */ |
Nikolay Aleksandrov | 70e4272 | 2018-11-24 04:34:21 +0200 | [diff] [blame] | 190 | if ((p->flags & BR_LEARNING) && |
Nikolay Aleksandrov | 5d1fcaf | 2019-11-04 11:36:51 +0200 | [diff] [blame] | 191 | nbp_state_should_learn(p) && |
Nikolay Aleksandrov | 70e4272 | 2018-11-24 04:34:21 +0200 | [diff] [blame] | 192 | !br_opt_get(p->br, BROPT_NO_LL_LEARN) && |
| 193 | br_should_learn(p, skb, &vid)) |
Nikolay Aleksandrov | be0c567 | 2019-11-01 14:46:37 +0200 | [diff] [blame] | 194 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0); |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 195 | } |
| 196 | |
| 197 | /* note: already called with rcu_read_lock */ |
| 198 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 199 | { |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 200 | __br_handle_local_finish(skb); |
Florian Westphal | 8626c56 | 2016-03-12 11:14:42 +0100 | [diff] [blame] | 201 | |
Nikolay Aleksandrov | 3b2e290 | 2019-04-11 13:56:39 +0300 | [diff] [blame] | 202 | /* return 1 to signal the okfn() was called so it's ok to use the skb */ |
| 203 | return 1; |
Stephen Hemminger | cf0f02d | 2006-03-20 22:59:06 -0800 | [diff] [blame] | 204 | } |
| 205 | |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 206 | static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb) |
| 207 | { |
| 208 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE |
| 209 | struct nf_hook_entries *e = NULL; |
| 210 | struct nf_hook_state state; |
| 211 | unsigned int verdict, i; |
| 212 | struct net *net; |
| 213 | int ret; |
| 214 | |
| 215 | net = dev_net(skb->dev); |
| 216 | #ifdef HAVE_JUMP_LABEL |
| 217 | if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING])) |
| 218 | goto frame_finish; |
| 219 | #endif |
| 220 | |
| 221 | e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]); |
| 222 | if (!e) |
| 223 | goto frame_finish; |
| 224 | |
| 225 | nf_hook_state_init(&state, NF_BR_PRE_ROUTING, |
| 226 | NFPROTO_BRIDGE, skb->dev, NULL, NULL, |
| 227 | net, br_handle_frame_finish); |
| 228 | |
| 229 | for (i = 0; i < e->num_hook_entries; i++) { |
| 230 | verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state); |
| 231 | switch (verdict & NF_VERDICT_MASK) { |
| 232 | case NF_ACCEPT: |
Florian Westphal | 223fd0a | 2019-04-11 16:36:42 +0200 | [diff] [blame] | 233 | if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) { |
| 234 | *pskb = skb; |
| 235 | return RX_HANDLER_PASS; |
| 236 | } |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 237 | break; |
| 238 | case NF_DROP: |
| 239 | kfree_skb(skb); |
| 240 | return RX_HANDLER_CONSUMED; |
| 241 | case NF_QUEUE: |
Florian Westphal | 0d9cb30 | 2019-07-02 20:41:14 +0200 | [diff] [blame] | 242 | ret = nf_queue(skb, &state, i, verdict); |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 243 | if (ret == 1) |
| 244 | continue; |
| 245 | return RX_HANDLER_CONSUMED; |
| 246 | default: /* STOLEN */ |
| 247 | return RX_HANDLER_CONSUMED; |
| 248 | } |
| 249 | } |
| 250 | frame_finish: |
| 251 | net = dev_net(skb->dev); |
| 252 | br_handle_frame_finish(net, NULL, skb); |
| 253 | #else |
| 254 | br_handle_frame_finish(dev_net(skb->dev), NULL, skb); |
| 255 | #endif |
| 256 | return RX_HANDLER_CONSUMED; |
| 257 | } |
| 258 | |
Henrik Bjoernlund | 90c628d | 2020-10-27 10:02:42 +0000 | [diff] [blame] | 259 | /* Return 0 if the frame was not processed otherwise 1 |
| 260 | * note: already called with rcu_read_lock |
| 261 | */ |
| 262 | static int br_process_frame_type(struct net_bridge_port *p, |
| 263 | struct sk_buff *skb) |
| 264 | { |
| 265 | struct br_frame_type *tmp; |
| 266 | |
| 267 | hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list) |
| 268 | if (unlikely(tmp->type == skb->protocol)) |
| 269 | return tmp->frame_handler(p, skb); |
| 270 | |
| 271 | return 0; |
| 272 | } |
| 273 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | /* |
Stephen Hemminger | 6229e36 | 2007-03-21 13:38:47 -0700 | [diff] [blame] | 275 | * Return NULL if skb is handled |
stephen hemminger | eeaf61d | 2010-07-27 08:26:30 +0000 | [diff] [blame] | 276 | * note: already called with rcu_read_lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | */ |
Vladimir Oltean | 9eb8eff | 2020-05-10 19:37:40 +0300 | [diff] [blame] | 278 | static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 279 | { |
Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 280 | struct net_bridge_port *p; |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 281 | struct sk_buff *skb = *pskb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
| 283 | |
Simon Horman | c2368e7 | 2010-08-22 17:35:32 +0000 | [diff] [blame] | 284 | if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 285 | return RX_HANDLER_PASS; |
Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 288 | goto drop; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | |
Herbert Xu | 7b99565 | 2007-10-14 00:39:01 -0700 | [diff] [blame] | 290 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 291 | if (!skb) |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 292 | return RX_HANDLER_CONSUMED; |
Herbert Xu | 7b99565 | 2007-10-14 00:39:01 -0700 | [diff] [blame] | 293 | |
Florian Westphal | f12064d | 2019-04-11 16:36:40 +0200 | [diff] [blame] | 294 | memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); |
| 295 | |
Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 296 | p = br_port_get_rcu(skb->dev); |
Roopa Prabhu | 11538d0 | 2017-01-31 22:59:55 -0800 | [diff] [blame] | 297 | if (p->flags & BR_VLAN_TUNNEL) { |
| 298 | if (br_handle_ingress_vlan_tunnel(skb, p, |
| 299 | nbp_vlan_group_rcu(p))) |
| 300 | goto drop; |
| 301 | } |
Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 302 | |
Ben Hutchings | 46acc46 | 2012-11-01 09:11:11 +0000 | [diff] [blame] | 303 | if (unlikely(is_link_local_ether_addr(dest))) { |
Toshiaki Makita | f2808d2 | 2014-06-10 20:59:24 +0900 | [diff] [blame] | 304 | u16 fwd_mask = p->br->group_fwd_mask_required; |
| 305 | |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 306 | /* |
| 307 | * See IEEE 802.1D Table 7-10 Reserved addresses |
| 308 | * |
| 309 | * Assignment Value |
| 310 | * Bridge Group Address 01-80-C2-00-00-00 |
| 311 | * (MAC Control) 802.3 01-80-C2-00-00-01 |
| 312 | * (Link Aggregation) 802.3 01-80-C2-00-00-02 |
| 313 | * 802.1X PAE address 01-80-C2-00-00-03 |
| 314 | * |
| 315 | * 802.1AB LLDP 01-80-C2-00-00-0E |
| 316 | * |
| 317 | * Others reserved for future standardization |
| 318 | */ |
Nikolay Aleksandrov | 5af48b5 | 2017-09-27 16:12:44 +0300 | [diff] [blame] | 319 | fwd_mask |= p->group_fwd_mask; |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 320 | switch (dest[5]) { |
| 321 | case 0x00: /* Bridge Group Address */ |
| 322 | /* If STP is turned off, |
| 323 | then must forward to keep loop detection */ |
Toshiaki Makita | f2808d2 | 2014-06-10 20:59:24 +0900 | [diff] [blame] | 324 | if (p->br->stp_enabled == BR_NO_STP || |
| 325 | fwd_mask & (1u << dest[5])) |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 326 | goto forward; |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 327 | *pskb = skb; |
| 328 | __br_handle_local_finish(skb); |
| 329 | return RX_HANDLER_PASS; |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 330 | |
| 331 | case 0x01: /* IEEE MAC (Pause) */ |
Stephen Hemminger | 2111f8b | 2007-04-25 22:05:55 -0700 | [diff] [blame] | 332 | goto drop; |
| 333 | |
Ido Schimmel | baedbe5 | 2016-07-22 14:56:20 +0300 | [diff] [blame] | 334 | case 0x0E: /* 802.1AB LLDP */ |
| 335 | fwd_mask |= p->br->group_fwd_mask; |
| 336 | if (fwd_mask & (1u << dest[5])) |
| 337 | goto forward; |
| 338 | *pskb = skb; |
| 339 | __br_handle_local_finish(skb); |
| 340 | return RX_HANDLER_PASS; |
| 341 | |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 342 | default: |
| 343 | /* Allow selective forwarding for most other protocols */ |
Toshiaki Makita | f2808d2 | 2014-06-10 20:59:24 +0900 | [diff] [blame] | 344 | fwd_mask |= p->br->group_fwd_mask; |
| 345 | if (fwd_mask & (1u << dest[5])) |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 346 | goto forward; |
| 347 | } |
Stephen Hemminger | a598f6a | 2009-05-15 06:10:13 +0000 | [diff] [blame] | 348 | |
Nikolay Aleksandrov | 3b2e290 | 2019-04-11 13:56:39 +0300 | [diff] [blame] | 349 | /* The else clause should be hit when nf_hook(): |
| 350 | * - returns < 0 (drop/error) |
| 351 | * - returns = 0 (stolen/nf_queue) |
| 352 | * Thus return 1 from the okfn() to signal the skb is ok to pass |
| 353 | */ |
| 354 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, |
| 355 | dev_net(skb->dev), NULL, skb, skb->dev, NULL, |
| 356 | br_handle_local_finish) == 1) { |
| 357 | return RX_HANDLER_PASS; |
| 358 | } else { |
| 359 | return RX_HANDLER_CONSUMED; |
| 360 | } |
Stephen Hemminger | 2111f8b | 2007-04-25 22:05:55 -0700 | [diff] [blame] | 361 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 362 | |
Henrik Bjoernlund | 90c628d | 2020-10-27 10:02:42 +0000 | [diff] [blame] | 363 | if (unlikely(br_process_frame_type(p, skb))) |
Horatiu Vultur | 6536993 | 2020-04-26 15:22:07 +0200 | [diff] [blame] | 364 | return RX_HANDLER_PASS; |
| 365 | |
Stephen Hemminger | a598f6a | 2009-05-15 06:10:13 +0000 | [diff] [blame] | 366 | forward: |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 367 | switch (p->state) { |
| 368 | case BR_STATE_FORWARDING: |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 369 | case BR_STATE_LEARNING: |
Joe Perches | 9a7b6ef9 | 2012-05-08 18:56:49 +0000 | [diff] [blame] | 370 | if (ether_addr_equal(p->br->dev->dev_addr, dest)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | skb->pkt_type = PACKET_HOST; |
| 372 | |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 373 | return nf_hook_bridge_pre(skb, pskb); |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 374 | default: |
| 375 | drop: |
| 376 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 378 | return RX_HANDLER_CONSUMED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } |
Vladimir Oltean | 9eb8eff | 2020-05-10 19:37:40 +0300 | [diff] [blame] | 380 | |
| 381 | /* This function has no purpose other than to appease the br_port_get_rcu/rtnl |
| 382 | * helpers which identify bridged ports according to the rx_handler installed |
| 383 | * on them (so there _needs_ to be a bridge rx_handler even if we don't need it |
| 384 | * to do anything useful). This bridge won't support traffic to/from the stack, |
| 385 | * but only hardware bridging. So return RX_HANDLER_PASS so we don't steal |
| 386 | * frames from the ETH_P_XDSA packet_type handler. |
| 387 | */ |
| 388 | static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb) |
| 389 | { |
| 390 | return RX_HANDLER_PASS; |
| 391 | } |
| 392 | |
| 393 | rx_handler_func_t *br_get_rx_handler(const struct net_device *dev) |
| 394 | { |
| 395 | if (netdev_uses_dsa(dev)) |
| 396 | return br_handle_frame_dummy; |
| 397 | |
| 398 | return br_handle_frame; |
| 399 | } |
Henrik Bjoernlund | 90c628d | 2020-10-27 10:02:42 +0000 | [diff] [blame] | 400 | |
| 401 | void br_add_frame(struct net_bridge *br, struct br_frame_type *ft) |
| 402 | { |
| 403 | hlist_add_head_rcu(&ft->list, &br->frame_type_list); |
| 404 | } |
| 405 | |
| 406 | void br_del_frame(struct net_bridge *br, struct br_frame_type *ft) |
| 407 | { |
| 408 | struct br_frame_type *tmp; |
| 409 | |
| 410 | hlist_for_each_entry(tmp, &br->frame_type_list, list) |
| 411 | if (ft == tmp) { |
| 412 | hlist_del_rcu(&ft->list); |
| 413 | return; |
| 414 | } |
| 415 | } |