Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Handle incoming frames |
| 4 | * Linux ethernet bridge |
| 5 | * |
| 6 | * Authors: |
| 7 | * Lennert Buytenhek <buytenh@gnu.org> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | */ |
| 9 | |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 10 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/netdevice.h> |
| 13 | #include <linux/etherdevice.h> |
| 14 | #include <linux/netfilter_bridge.h> |
Stephen Rothwell | dc2f418 | 2019-04-13 14:03:36 +1000 | [diff] [blame] | 15 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 16 | #include <net/netfilter/nf_queue.h> |
Stephen Rothwell | dc2f418 | 2019-04-13 14:03:36 +1000 | [diff] [blame] | 17 | #endif |
Kyeyoon Park | 9585011 | 2014-10-23 14:49:17 -0700 | [diff] [blame] | 18 | #include <linux/neighbour.h> |
| 19 | #include <net/arp.h> |
Vladimir Oltean | 9eb8eff | 2020-05-10 19:37:40 +0300 | [diff] [blame] | 20 | #include <net/dsa.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 21 | #include <linux/export.h> |
Vlad Yasevich | a37b85c | 2013-02-13 12:00:10 +0000 | [diff] [blame] | 22 | #include <linux/rculist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include "br_private.h" |
Roopa Prabhu | 11538d0 | 2017-01-31 22:59:55 -0800 | [diff] [blame] | 24 | #include "br_private_tunnel.h" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 26 | static int |
| 27 | br_netif_receive_skb(struct net *net, struct sock *sk, struct sk_buff *skb) |
Eric W. Biederman | 04eb448 | 2015-09-15 20:04:15 -0500 | [diff] [blame] | 28 | { |
Florian Westphal | a13b208 | 2017-03-13 17:38:17 +0100 | [diff] [blame] | 29 | br_drop_fake_rtable(skb); |
Eric W. Biederman | 04eb448 | 2015-09-15 20:04:15 -0500 | [diff] [blame] | 30 | return netif_receive_skb(skb); |
| 31 | } |
| 32 | |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 33 | static int br_pass_frame_up(struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | { |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 35 | struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev; |
stephen hemminger | 14bb478 | 2010-03-02 13:32:09 +0000 | [diff] [blame] | 36 | struct net_bridge *br = netdev_priv(brdev); |
Nikolay Aleksandrov | 2594e906 | 2015-09-25 19:00:11 +0200 | [diff] [blame] | 37 | struct net_bridge_vlan_group *vg; |
Li RongQing | 8f84985 | 2014-01-04 13:57:59 +0800 | [diff] [blame] | 38 | struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | |
Eric Dumazet | 406818f | 2010-06-23 13:00:48 -0700 | [diff] [blame] | 40 | u64_stats_update_begin(&brstats->syncp); |
stephen hemminger | 14bb478 | 2010-03-02 13:32:09 +0000 | [diff] [blame] | 41 | brstats->rx_packets++; |
| 42 | brstats->rx_bytes += skb->len; |
Eric Dumazet | 406818f | 2010-06-23 13:00:48 -0700 | [diff] [blame] | 43 | u64_stats_update_end(&brstats->syncp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | |
Nikolay Aleksandrov | 907b1e6 | 2015-10-12 21:47:02 +0200 | [diff] [blame] | 45 | vg = br_vlan_group_rcu(br); |
Vlad Yasevich | 85f46c6 | 2013-02-13 12:00:11 +0000 | [diff] [blame] | 46 | /* Bridge is just like any other port. Make sure the |
| 47 | * packet is allowed except in promisc modue when someone |
| 48 | * may be running packet capture. |
| 49 | */ |
| 50 | if (!(brdev->flags & IFF_PROMISC) && |
Nikolay Aleksandrov | 2594e906 | 2015-09-25 19:00:11 +0200 | [diff] [blame] | 51 | !br_allowed_egress(vg, skb)) { |
Vlad Yasevich | 85f46c6 | 2013-02-13 12:00:11 +0000 | [diff] [blame] | 52 | kfree_skb(skb); |
| 53 | return NET_RX_DROP; |
| 54 | } |
| 55 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | indev = skb->dev; |
Pavel Emelyanov | a339f1c | 2008-05-21 14:13:47 -0700 | [diff] [blame] | 57 | skb->dev = brdev; |
Roopa Prabhu | 11538d0 | 2017-01-31 22:59:55 -0800 | [diff] [blame] | 58 | skb = br_handle_vlan(br, NULL, vg, skb); |
Vlad Yasevich | fc92f74 | 2014-03-27 21:51:18 -0400 | [diff] [blame] | 59 | if (!skb) |
| 60 | return NET_RX_DROP; |
Nikolay Aleksandrov | 1080ab9 | 2016-06-28 16:57:06 +0200 | [diff] [blame] | 61 | /* update the multicast stats if the packet is IGMP/MLD */ |
Nikolay Aleksandrov | a65056e | 2016-07-06 12:12:21 -0700 | [diff] [blame] | 62 | br_multicast_count(br, NULL, skb, br_multicast_igmp_type(skb), |
Nikolay Aleksandrov | 1080ab9 | 2016-06-28 16:57:06 +0200 | [diff] [blame] | 63 | BR_MCAST_DIR_TX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 65 | return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, |
| 66 | dev_net(indev), NULL, skb, indev, NULL, |
Eric W. Biederman | 04eb448 | 2015-09-15 20:04:15 -0500 | [diff] [blame] | 67 | br_netif_receive_skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
stephen hemminger | eeaf61d | 2010-07-27 08:26:30 +0000 | [diff] [blame] | 70 | /* note: already called with rcu_read_lock */ |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 71 | int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { |
Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 73 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 74 | enum br_pkt_type pkt_type = BR_PKT_UNICAST; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 75 | struct net_bridge_fdb_entry *dst = NULL; |
Herbert Xu | c4fcb78 | 2010-02-27 19:41:48 +0000 | [diff] [blame] | 76 | struct net_bridge_mdb_entry *mdst; |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 77 | bool local_rcv, mcast_hit = false; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 78 | struct net_bridge *br; |
Vlad Yasevich | 7885198 | 2013-02-13 12:00:14 +0000 | [diff] [blame] | 79 | u16 vid = 0; |
Nikolay Aleksandrov | a580c76 | 2020-01-24 13:40:22 +0200 | [diff] [blame] | 80 | u8 state; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 82 | if (!p || p->state == BR_STATE_DISABLED) |
| 83 | goto drop; |
Stephen Hemminger | 85967bb | 2005-05-29 14:15:55 -0700 | [diff] [blame] | 84 | |
Nikolay Aleksandrov | a580c76 | 2020-01-24 13:40:22 +0200 | [diff] [blame] | 85 | state = p->state; |
| 86 | if (!br_allowed_ingress(p->br, nbp_vlan_group_rcu(p), skb, &vid, |
| 87 | &state)) |
Toshiaki Makita | eb70761 | 2014-04-09 17:00:30 +0900 | [diff] [blame] | 88 | goto out; |
Vlad Yasevich | a37b85c | 2013-02-13 12:00:10 +0000 | [diff] [blame] | 89 | |
Ido Schimmel | 6bc506b | 2016-08-25 18:42:37 +0200 | [diff] [blame] | 90 | nbp_switchdev_frame_mark(p, skb); |
| 91 | |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 92 | /* insert into forwarding database after filtering to avoid spoofing */ |
| 93 | br = p->br; |
Vlad Yasevich | 9ba1889 | 2013-06-05 10:08:00 -0400 | [diff] [blame] | 94 | if (p->flags & BR_LEARNING) |
Nikolay Aleksandrov | be0c567 | 2019-11-01 14:46:37 +0200 | [diff] [blame] | 95 | br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, 0); |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 96 | |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 97 | local_rcv = !!(br->dev->flags & IFF_PROMISC); |
Nikolay Aleksandrov | 3d26eb8 | 2019-07-02 15:00:20 +0300 | [diff] [blame] | 98 | if (is_multicast_ether_addr(eth_hdr(skb)->h_dest)) { |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 99 | /* by definition the broadcast is also a multicast address */ |
Nikolay Aleksandrov | 3d26eb8 | 2019-07-02 15:00:20 +0300 | [diff] [blame] | 100 | if (is_broadcast_ether_addr(eth_hdr(skb)->h_dest)) { |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 101 | pkt_type = BR_PKT_BROADCAST; |
| 102 | local_rcv = true; |
| 103 | } else { |
| 104 | pkt_type = BR_PKT_MULTICAST; |
| 105 | if (br_multicast_rcv(br, p, skb, vid)) |
| 106 | goto drop; |
| 107 | } |
| 108 | } |
Herbert Xu | c4fcb78 | 2010-02-27 19:41:48 +0000 | [diff] [blame] | 109 | |
Nikolay Aleksandrov | a580c76 | 2020-01-24 13:40:22 +0200 | [diff] [blame] | 110 | if (state == BR_STATE_LEARNING) |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 111 | goto drop; |
Stephen Hemminger | 0e5eaba | 2005-12-21 19:00:18 -0800 | [diff] [blame] | 112 | |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 113 | BR_INPUT_SKB_CB(skb)->brdev = br->dev; |
Nikolay Aleksandrov | 7d850ab | 2018-05-24 11:56:48 +0300 | [diff] [blame] | 114 | BR_INPUT_SKB_CB(skb)->src_port_isolated = !!(p->flags & BR_ISOLATED); |
Herbert Xu | 68b7c89 | 2010-02-27 19:41:40 +0000 | [diff] [blame] | 115 | |
Roopa Prabhu | 057658c | 2017-10-06 22:12:38 -0700 | [diff] [blame] | 116 | if (IS_ENABLED(CONFIG_INET) && |
| 117 | (skb->protocol == htons(ETH_P_ARP) || |
| 118 | skb->protocol == htons(ETH_P_RARP))) { |
| 119 | br_do_proxy_suppress_arp(skb, br, vid, p); |
Roopa Prabhu | ed842fa | 2017-10-06 22:12:39 -0700 | [diff] [blame] | 120 | } else if (IS_ENABLED(CONFIG_IPV6) && |
| 121 | skb->protocol == htons(ETH_P_IPV6) && |
Nikolay Aleksandrov | c69c2cd | 2018-09-26 17:01:05 +0300 | [diff] [blame] | 122 | br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED) && |
Roopa Prabhu | ed842fa | 2017-10-06 22:12:39 -0700 | [diff] [blame] | 123 | pskb_may_pull(skb, sizeof(struct ipv6hdr) + |
| 124 | sizeof(struct nd_msg)) && |
| 125 | ipv6_hdr(skb)->nexthdr == IPPROTO_ICMPV6) { |
| 126 | struct nd_msg *msg, _msg; |
| 127 | |
| 128 | msg = br_is_nd_neigh_msg(skb, &_msg); |
| 129 | if (msg) |
| 130 | br_do_suppress_nd(skb, br, vid, p, msg); |
Roopa Prabhu | 057658c | 2017-10-06 22:12:38 -0700 | [diff] [blame] | 131 | } |
Kyeyoon Park | 9585011 | 2014-10-23 14:49:17 -0700 | [diff] [blame] | 132 | |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 133 | switch (pkt_type) { |
| 134 | case BR_PKT_MULTICAST: |
Cong Wang | fbca58a | 2013-03-07 03:05:33 +0000 | [diff] [blame] | 135 | mdst = br_mdb_get(br, skb, vid); |
Linus Lüssing | b00589a | 2013-08-01 01:06:20 +0200 | [diff] [blame] | 136 | if ((mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) && |
Linus Lüssing | cc0fdd8 | 2013-08-30 17:28:17 +0200 | [diff] [blame] | 137 | br_multicast_querier_exists(br, eth_hdr(skb))) { |
Andrew Lunn | ff0fd34 | 2017-11-09 23:10:57 +0100 | [diff] [blame] | 138 | if ((mdst && mdst->host_joined) || |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 139 | br_multicast_is_router(br)) { |
Nikolay Aleksandrov | b35c5f6 | 2016-07-14 06:10:01 +0300 | [diff] [blame] | 140 | local_rcv = true; |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 141 | br->dev->stats.multicast++; |
| 142 | } |
| 143 | mcast_hit = true; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 144 | } else { |
Nikolay Aleksandrov | b35c5f6 | 2016-07-14 06:10:01 +0300 | [diff] [blame] | 145 | local_rcv = true; |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 146 | br->dev->stats.multicast++; |
Nikolay Aleksandrov | 46c0772 | 2016-07-14 06:09:59 +0300 | [diff] [blame] | 147 | } |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 148 | break; |
| 149 | case BR_PKT_UNICAST: |
Nikolay Aleksandrov | 3d26eb8 | 2019-07-02 15:00:20 +0300 | [diff] [blame] | 150 | dst = br_fdb_find_rcu(br, eth_hdr(skb)->h_dest, vid); |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 151 | default: |
| 152 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | } |
| 154 | |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 155 | if (dst) { |
stephen hemminger | ca6d448 | 2017-02-07 08:46:46 -0800 | [diff] [blame] | 156 | unsigned long now = jiffies; |
| 157 | |
Nikolay Aleksandrov | 6869c3b | 2019-10-29 13:45:53 +0200 | [diff] [blame] | 158 | if (test_bit(BR_FDB_LOCAL, &dst->flags)) |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 159 | return br_pass_frame_up(skb); |
| 160 | |
stephen hemminger | ca6d448 | 2017-02-07 08:46:46 -0800 | [diff] [blame] | 161 | if (now != dst->used) |
| 162 | dst->used = now; |
Nikolay Aleksandrov | 37b090e | 2016-07-14 06:10:02 +0300 | [diff] [blame] | 163 | br_forward(dst->dst, skb, local_rcv, false); |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 164 | } else { |
| 165 | if (!mcast_hit) |
Nikolay Aleksandrov | 8addd5e | 2016-08-31 15:36:51 +0200 | [diff] [blame] | 166 | br_flood(br, skb, pkt_type, local_rcv, false); |
Nikolay Aleksandrov | e151aab | 2016-07-14 06:10:00 +0300 | [diff] [blame] | 167 | else |
Nikolay Aleksandrov | 37b090e | 2016-07-14 06:10:02 +0300 | [diff] [blame] | 168 | br_multicast_flood(mdst, skb, local_rcv, false); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 169 | } |
| 170 | |
Nikolay Aleksandrov | b35c5f6 | 2016-07-14 06:10:01 +0300 | [diff] [blame] | 171 | if (local_rcv) |
| 172 | return br_pass_frame_up(skb); |
Herbert Xu | 87557c1 | 2010-02-27 19:41:39 +0000 | [diff] [blame] | 173 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | out: |
| 175 | return 0; |
Stephen Hemminger | b3f1be4 | 2006-02-09 17:08:52 -0800 | [diff] [blame] | 176 | drop: |
| 177 | kfree_skb(skb); |
| 178 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | } |
Pablo Neira Ayuso | 34666d4 | 2014-09-18 11:29:03 +0200 | [diff] [blame] | 180 | EXPORT_SYMBOL_GPL(br_handle_frame_finish); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 182 | static void __br_handle_local_finish(struct sk_buff *skb) |
Stephen Hemminger | cf0f02d | 2006-03-20 22:59:06 -0800 | [diff] [blame] | 183 | { |
Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 184 | struct net_bridge_port *p = br_port_get_rcu(skb->dev); |
Vlad Yasevich | 2ba071e | 2013-02-13 12:00:16 +0000 | [diff] [blame] | 185 | u16 vid = 0; |
Stephen Hemminger | cf0f02d | 2006-03-20 22:59:06 -0800 | [diff] [blame] | 186 | |
Toshiaki Makita | e0d7968 | 2014-05-26 15:15:53 +0900 | [diff] [blame] | 187 | /* check if vlan is allowed, to avoid spoofing */ |
Nikolay Aleksandrov | 70e4272 | 2018-11-24 04:34:21 +0200 | [diff] [blame] | 188 | if ((p->flags & BR_LEARNING) && |
Nikolay Aleksandrov | 5d1fcaf | 2019-11-04 11:36:51 +0200 | [diff] [blame] | 189 | nbp_state_should_learn(p) && |
Nikolay Aleksandrov | 70e4272 | 2018-11-24 04:34:21 +0200 | [diff] [blame] | 190 | !br_opt_get(p->br, BROPT_NO_LL_LEARN) && |
| 191 | br_should_learn(p, skb, &vid)) |
Nikolay Aleksandrov | be0c567 | 2019-11-01 14:46:37 +0200 | [diff] [blame] | 192 | br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, 0); |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 193 | } |
| 194 | |
| 195 | /* note: already called with rcu_read_lock */ |
| 196 | static int br_handle_local_finish(struct net *net, struct sock *sk, struct sk_buff *skb) |
| 197 | { |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 198 | __br_handle_local_finish(skb); |
Florian Westphal | 8626c56 | 2016-03-12 11:14:42 +0100 | [diff] [blame] | 199 | |
Nikolay Aleksandrov | 3b2e290 | 2019-04-11 13:56:39 +0300 | [diff] [blame] | 200 | /* return 1 to signal the okfn() was called so it's ok to use the skb */ |
| 201 | return 1; |
Stephen Hemminger | cf0f02d | 2006-03-20 22:59:06 -0800 | [diff] [blame] | 202 | } |
| 203 | |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 204 | static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb) |
| 205 | { |
| 206 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE |
| 207 | struct nf_hook_entries *e = NULL; |
| 208 | struct nf_hook_state state; |
| 209 | unsigned int verdict, i; |
| 210 | struct net *net; |
| 211 | int ret; |
| 212 | |
| 213 | net = dev_net(skb->dev); |
| 214 | #ifdef HAVE_JUMP_LABEL |
| 215 | if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING])) |
| 216 | goto frame_finish; |
| 217 | #endif |
| 218 | |
| 219 | e = rcu_dereference(net->nf.hooks_bridge[NF_BR_PRE_ROUTING]); |
| 220 | if (!e) |
| 221 | goto frame_finish; |
| 222 | |
| 223 | nf_hook_state_init(&state, NF_BR_PRE_ROUTING, |
| 224 | NFPROTO_BRIDGE, skb->dev, NULL, NULL, |
| 225 | net, br_handle_frame_finish); |
| 226 | |
| 227 | for (i = 0; i < e->num_hook_entries; i++) { |
| 228 | verdict = nf_hook_entry_hookfn(&e->hooks[i], skb, &state); |
| 229 | switch (verdict & NF_VERDICT_MASK) { |
| 230 | case NF_ACCEPT: |
Florian Westphal | 223fd0a | 2019-04-11 16:36:42 +0200 | [diff] [blame] | 231 | if (BR_INPUT_SKB_CB(skb)->br_netfilter_broute) { |
| 232 | *pskb = skb; |
| 233 | return RX_HANDLER_PASS; |
| 234 | } |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 235 | break; |
| 236 | case NF_DROP: |
| 237 | kfree_skb(skb); |
| 238 | return RX_HANDLER_CONSUMED; |
| 239 | case NF_QUEUE: |
Florian Westphal | 0d9cb30 | 2019-07-02 20:41:14 +0200 | [diff] [blame] | 240 | ret = nf_queue(skb, &state, i, verdict); |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 241 | if (ret == 1) |
| 242 | continue; |
| 243 | return RX_HANDLER_CONSUMED; |
| 244 | default: /* STOLEN */ |
| 245 | return RX_HANDLER_CONSUMED; |
| 246 | } |
| 247 | } |
| 248 | frame_finish: |
| 249 | net = dev_net(skb->dev); |
| 250 | br_handle_frame_finish(net, NULL, skb); |
| 251 | #else |
| 252 | br_handle_frame_finish(dev_net(skb->dev), NULL, skb); |
| 253 | #endif |
| 254 | return RX_HANDLER_CONSUMED; |
| 255 | } |
| 256 | |
Henrik Bjoernlund | 90c628d | 2020-10-27 10:02:42 +0000 | [diff] [blame^] | 257 | /* Return 0 if the frame was not processed otherwise 1 |
| 258 | * note: already called with rcu_read_lock |
| 259 | */ |
| 260 | static int br_process_frame_type(struct net_bridge_port *p, |
| 261 | struct sk_buff *skb) |
| 262 | { |
| 263 | struct br_frame_type *tmp; |
| 264 | |
| 265 | hlist_for_each_entry_rcu(tmp, &p->br->frame_type_list, list) |
| 266 | if (unlikely(tmp->type == skb->protocol)) |
| 267 | return tmp->frame_handler(p, skb); |
| 268 | |
| 269 | return 0; |
| 270 | } |
| 271 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 272 | /* |
Stephen Hemminger | 6229e36 | 2007-03-21 13:38:47 -0700 | [diff] [blame] | 273 | * Return NULL if skb is handled |
stephen hemminger | eeaf61d | 2010-07-27 08:26:30 +0000 | [diff] [blame] | 274 | * note: already called with rcu_read_lock |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | */ |
Vladimir Oltean | 9eb8eff | 2020-05-10 19:37:40 +0300 | [diff] [blame] | 276 | static rx_handler_result_t br_handle_frame(struct sk_buff **pskb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 277 | { |
Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 278 | struct net_bridge_port *p; |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 279 | struct sk_buff *skb = *pskb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | const unsigned char *dest = eth_hdr(skb)->h_dest; |
| 281 | |
Simon Horman | c2368e7 | 2010-08-22 17:35:32 +0000 | [diff] [blame] | 282 | if (unlikely(skb->pkt_type == PACKET_LOOPBACK)) |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 283 | return RX_HANDLER_PASS; |
Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 284 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | if (!is_valid_ether_addr(eth_hdr(skb)->h_source)) |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 286 | goto drop; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
Herbert Xu | 7b99565 | 2007-10-14 00:39:01 -0700 | [diff] [blame] | 288 | skb = skb_share_check(skb, GFP_ATOMIC); |
| 289 | if (!skb) |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 290 | return RX_HANDLER_CONSUMED; |
Herbert Xu | 7b99565 | 2007-10-14 00:39:01 -0700 | [diff] [blame] | 291 | |
Florian Westphal | f12064d | 2019-04-11 16:36:40 +0200 | [diff] [blame] | 292 | memset(skb->cb, 0, sizeof(struct br_input_skb_cb)); |
| 293 | |
Jiri Pirko | f350a0a8 | 2010-06-15 06:50:45 +0000 | [diff] [blame] | 294 | p = br_port_get_rcu(skb->dev); |
Roopa Prabhu | 11538d0 | 2017-01-31 22:59:55 -0800 | [diff] [blame] | 295 | if (p->flags & BR_VLAN_TUNNEL) { |
| 296 | if (br_handle_ingress_vlan_tunnel(skb, p, |
| 297 | nbp_vlan_group_rcu(p))) |
| 298 | goto drop; |
| 299 | } |
Jiri Pirko | ab95bfe | 2010-06-01 21:52:08 +0000 | [diff] [blame] | 300 | |
Ben Hutchings | 46acc46 | 2012-11-01 09:11:11 +0000 | [diff] [blame] | 301 | if (unlikely(is_link_local_ether_addr(dest))) { |
Toshiaki Makita | f2808d2 | 2014-06-10 20:59:24 +0900 | [diff] [blame] | 302 | u16 fwd_mask = p->br->group_fwd_mask_required; |
| 303 | |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 304 | /* |
| 305 | * See IEEE 802.1D Table 7-10 Reserved addresses |
| 306 | * |
| 307 | * Assignment Value |
| 308 | * Bridge Group Address 01-80-C2-00-00-00 |
| 309 | * (MAC Control) 802.3 01-80-C2-00-00-01 |
| 310 | * (Link Aggregation) 802.3 01-80-C2-00-00-02 |
| 311 | * 802.1X PAE address 01-80-C2-00-00-03 |
| 312 | * |
| 313 | * 802.1AB LLDP 01-80-C2-00-00-0E |
| 314 | * |
| 315 | * Others reserved for future standardization |
| 316 | */ |
Nikolay Aleksandrov | 5af48b5 | 2017-09-27 16:12:44 +0300 | [diff] [blame] | 317 | fwd_mask |= p->group_fwd_mask; |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 318 | switch (dest[5]) { |
| 319 | case 0x00: /* Bridge Group Address */ |
| 320 | /* If STP is turned off, |
| 321 | then must forward to keep loop detection */ |
Toshiaki Makita | f2808d2 | 2014-06-10 20:59:24 +0900 | [diff] [blame] | 322 | if (p->br->stp_enabled == BR_NO_STP || |
| 323 | fwd_mask & (1u << dest[5])) |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 324 | goto forward; |
Ido Schimmel | 56fae404 | 2016-06-07 12:06:58 +0300 | [diff] [blame] | 325 | *pskb = skb; |
| 326 | __br_handle_local_finish(skb); |
| 327 | return RX_HANDLER_PASS; |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 328 | |
| 329 | case 0x01: /* IEEE MAC (Pause) */ |
Stephen Hemminger | 2111f8b | 2007-04-25 22:05:55 -0700 | [diff] [blame] | 330 | goto drop; |
| 331 | |
Ido Schimmel | baedbe5 | 2016-07-22 14:56:20 +0300 | [diff] [blame] | 332 | case 0x0E: /* 802.1AB LLDP */ |
| 333 | fwd_mask |= p->br->group_fwd_mask; |
| 334 | if (fwd_mask & (1u << dest[5])) |
| 335 | goto forward; |
| 336 | *pskb = skb; |
| 337 | __br_handle_local_finish(skb); |
| 338 | return RX_HANDLER_PASS; |
| 339 | |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 340 | default: |
| 341 | /* Allow selective forwarding for most other protocols */ |
Toshiaki Makita | f2808d2 | 2014-06-10 20:59:24 +0900 | [diff] [blame] | 342 | fwd_mask |= p->br->group_fwd_mask; |
| 343 | if (fwd_mask & (1u << dest[5])) |
stephen hemminger | 515853c | 2011-10-03 18:14:46 +0000 | [diff] [blame] | 344 | goto forward; |
| 345 | } |
Stephen Hemminger | a598f6a | 2009-05-15 06:10:13 +0000 | [diff] [blame] | 346 | |
Nikolay Aleksandrov | 3b2e290 | 2019-04-11 13:56:39 +0300 | [diff] [blame] | 347 | /* The else clause should be hit when nf_hook(): |
| 348 | * - returns < 0 (drop/error) |
| 349 | * - returns = 0 (stolen/nf_queue) |
| 350 | * Thus return 1 from the okfn() to signal the skb is ok to pass |
| 351 | */ |
| 352 | if (NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, |
| 353 | dev_net(skb->dev), NULL, skb, skb->dev, NULL, |
| 354 | br_handle_local_finish) == 1) { |
| 355 | return RX_HANDLER_PASS; |
| 356 | } else { |
| 357 | return RX_HANDLER_CONSUMED; |
| 358 | } |
Stephen Hemminger | 2111f8b | 2007-04-25 22:05:55 -0700 | [diff] [blame] | 359 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
Henrik Bjoernlund | 90c628d | 2020-10-27 10:02:42 +0000 | [diff] [blame^] | 361 | if (unlikely(br_process_frame_type(p, skb))) |
Horatiu Vultur | 6536993 | 2020-04-26 15:22:07 +0200 | [diff] [blame] | 362 | return RX_HANDLER_PASS; |
| 363 | |
Stephen Hemminger | a598f6a | 2009-05-15 06:10:13 +0000 | [diff] [blame] | 364 | forward: |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 365 | switch (p->state) { |
| 366 | case BR_STATE_FORWARDING: |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 367 | case BR_STATE_LEARNING: |
Joe Perches | 9a7b6ef9 | 2012-05-08 18:56:49 +0000 | [diff] [blame] | 368 | if (ether_addr_equal(p->br->dev->dev_addr, dest)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 369 | skb->pkt_type = PACKET_HOST; |
| 370 | |
Florian Westphal | 971502d | 2019-04-11 16:36:41 +0200 | [diff] [blame] | 371 | return nf_hook_bridge_pre(skb, pskb); |
Stephen Hemminger | 467aea0 | 2007-03-21 13:42:06 -0700 | [diff] [blame] | 372 | default: |
| 373 | drop: |
| 374 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | } |
Jiri Pirko | 8a4eb57 | 2011-03-12 03:14:39 +0000 | [diff] [blame] | 376 | return RX_HANDLER_CONSUMED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 377 | } |
Vladimir Oltean | 9eb8eff | 2020-05-10 19:37:40 +0300 | [diff] [blame] | 378 | |
| 379 | /* This function has no purpose other than to appease the br_port_get_rcu/rtnl |
| 380 | * helpers which identify bridged ports according to the rx_handler installed |
| 381 | * on them (so there _needs_ to be a bridge rx_handler even if we don't need it |
| 382 | * to do anything useful). This bridge won't support traffic to/from the stack, |
| 383 | * but only hardware bridging. So return RX_HANDLER_PASS so we don't steal |
| 384 | * frames from the ETH_P_XDSA packet_type handler. |
| 385 | */ |
| 386 | static rx_handler_result_t br_handle_frame_dummy(struct sk_buff **pskb) |
| 387 | { |
| 388 | return RX_HANDLER_PASS; |
| 389 | } |
| 390 | |
| 391 | rx_handler_func_t *br_get_rx_handler(const struct net_device *dev) |
| 392 | { |
| 393 | if (netdev_uses_dsa(dev)) |
| 394 | return br_handle_frame_dummy; |
| 395 | |
| 396 | return br_handle_frame; |
| 397 | } |
Henrik Bjoernlund | 90c628d | 2020-10-27 10:02:42 +0000 | [diff] [blame^] | 398 | |
| 399 | void br_add_frame(struct net_bridge *br, struct br_frame_type *ft) |
| 400 | { |
| 401 | hlist_add_head_rcu(&ft->list, &br->frame_type_list); |
| 402 | } |
| 403 | |
| 404 | void br_del_frame(struct net_bridge *br, struct br_frame_type *ft) |
| 405 | { |
| 406 | struct br_frame_type *tmp; |
| 407 | |
| 408 | hlist_for_each_entry(tmp, &br->frame_type_list, list) |
| 409 | if (ft == tmp) { |
| 410 | hlist_del_rcu(&ft->list); |
| 411 | return; |
| 412 | } |
| 413 | } |