Sven Eckelmann | 7db7d9f | 2017-11-19 15:05:11 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Sven Eckelmann | cfa55c6 | 2021-01-01 00:00:01 +0100 | [diff] [blame] | 2 | /* Copyright (C) B.A.T.M.A.N. contributors: |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 3 | * |
| 4 | * Marek Lindner, Simon Wunderlich |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 5 | */ |
| 6 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 7 | #include "soft-interface.h" |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 8 | #include "main.h" |
| 9 | |
| 10 | #include <linux/atomic.h> |
| 11 | #include <linux/byteorder/generic.h> |
| 12 | #include <linux/cache.h> |
| 13 | #include <linux/compiler.h> |
Sven Eckelmann | c408c1b | 2016-07-22 01:30:10 +0200 | [diff] [blame] | 14 | #include <linux/cpumask.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 15 | #include <linux/errno.h> |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 16 | #include <linux/etherdevice.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 17 | #include <linux/ethtool.h> |
Sven Eckelmann | b92b94a | 2017-11-19 17:12:02 +0100 | [diff] [blame] | 18 | #include <linux/gfp.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 19 | #include <linux/if_ether.h> |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 20 | #include <linux/if_vlan.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 21 | #include <linux/jiffies.h> |
| 22 | #include <linux/kernel.h> |
Sven Eckelmann | 6be4d30 | 2016-01-16 10:29:42 +0100 | [diff] [blame] | 23 | #include <linux/kref.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 24 | #include <linux/list.h> |
Sven Eckelmann | 2092c91 | 2020-05-06 22:13:30 +0200 | [diff] [blame] | 25 | #include <linux/lockdep.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 26 | #include <linux/netdevice.h> |
Sven Eckelmann | 68a600d | 2019-05-24 20:11:17 +0200 | [diff] [blame] | 27 | #include <linux/netlink.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 28 | #include <linux/percpu.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 29 | #include <linux/random.h> |
| 30 | #include <linux/rculist.h> |
| 31 | #include <linux/rcupdate.h> |
| 32 | #include <linux/skbuff.h> |
| 33 | #include <linux/slab.h> |
| 34 | #include <linux/socket.h> |
| 35 | #include <linux/spinlock.h> |
| 36 | #include <linux/stddef.h> |
| 37 | #include <linux/string.h> |
| 38 | #include <linux/types.h> |
Sven Eckelmann | bf6b260 | 2021-06-01 23:40:16 +0200 | [diff] [blame] | 39 | #include <net/net_namespace.h> |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 40 | #include <net/netlink.h> |
Sven Eckelmann | fec149f | 2017-12-21 10:17:41 +0100 | [diff] [blame] | 41 | #include <uapi/linux/batadv_packet.h> |
Sven Eckelmann | e2d0d35 | 2018-11-23 13:15:00 +0100 | [diff] [blame] | 42 | #include <uapi/linux/batman_adv.h> |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 43 | |
Sven Eckelmann | 01d350d | 2016-05-15 11:07:44 +0200 | [diff] [blame] | 44 | #include "bat_algo.h" |
Simon Wunderlich | 2372138 | 2012-01-22 20:00:19 +0100 | [diff] [blame] | 45 | #include "bridge_loop_avoidance.h" |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 46 | #include "distributed-arp-table.h" |
| 47 | #include "gateway_client.h" |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 48 | #include "hard-interface.h" |
| 49 | #include "multicast.h" |
Martin Hundebøll | d353d8d | 2013-01-25 11:12:38 +0100 | [diff] [blame] | 50 | #include "network-coding.h" |
Sven Eckelmann | f19dc77 | 2016-06-27 08:15:42 +0200 | [diff] [blame] | 51 | #include "originator.h" |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 52 | #include "send.h" |
Sven Eckelmann | 1e2c2a4 | 2015-04-17 19:40:28 +0200 | [diff] [blame] | 53 | #include "translation-table.h" |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 54 | |
Sven Eckelmann | ff15c27 | 2017-12-02 19:51:53 +0100 | [diff] [blame] | 55 | /** |
| 56 | * batadv_skb_head_push() - Increase header size and move (push) head pointer |
| 57 | * @skb: packet buffer which should be modified |
| 58 | * @len: number of bytes to add |
| 59 | * |
| 60 | * Return: 0 on success or negative error number in case of failure |
| 61 | */ |
Sven Eckelmann | 04b482a | 2012-05-12 02:09:38 +0200 | [diff] [blame] | 62 | int batadv_skb_head_push(struct sk_buff *skb, unsigned int len) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 63 | { |
| 64 | int result; |
| 65 | |
Sven Eckelmann | 9cfc7bd | 2012-05-12 02:09:43 +0200 | [diff] [blame] | 66 | /* TODO: We must check if we can release all references to non-payload |
Sven Eckelmann | 48915ae | 2017-09-28 17:16:51 +0200 | [diff] [blame] | 67 | * data using __skb_header_release in our skbs to allow skb_cow_header |
| 68 | * to work optimally. This means that those skbs are not allowed to read |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 69 | * or write any data which is before the current position of skb->data |
| 70 | * after that call and thus allow other skbs with the same data buffer |
| 71 | * to write freely in that area. |
| 72 | */ |
| 73 | result = skb_cow_head(skb, len); |
| 74 | if (result < 0) |
| 75 | return result; |
| 76 | |
| 77 | skb_push(skb, len); |
| 78 | return 0; |
| 79 | } |
| 80 | |
Sven Eckelmann | 0294ca0 | 2012-05-16 20:23:15 +0200 | [diff] [blame] | 81 | static int batadv_interface_open(struct net_device *dev) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 82 | { |
| 83 | netif_start_queue(dev); |
| 84 | return 0; |
| 85 | } |
| 86 | |
Sven Eckelmann | 0294ca0 | 2012-05-16 20:23:15 +0200 | [diff] [blame] | 87 | static int batadv_interface_release(struct net_device *dev) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 88 | { |
| 89 | netif_stop_queue(dev); |
| 90 | return 0; |
| 91 | } |
| 92 | |
Sven Eckelmann | c408c1b | 2016-07-22 01:30:10 +0200 | [diff] [blame] | 93 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 94 | * batadv_sum_counter() - Sum the cpu-local counters for index 'idx' |
Sven Eckelmann | c408c1b | 2016-07-22 01:30:10 +0200 | [diff] [blame] | 95 | * @bat_priv: the bat priv with all the soft interface information |
| 96 | * @idx: index of counter to sum up |
| 97 | * |
| 98 | * Return: sum of all cpu-local counters |
| 99 | */ |
| 100 | static u64 batadv_sum_counter(struct batadv_priv *bat_priv, size_t idx) |
| 101 | { |
| 102 | u64 *counters, sum = 0; |
| 103 | int cpu; |
| 104 | |
| 105 | for_each_possible_cpu(cpu) { |
| 106 | counters = per_cpu_ptr(bat_priv->bat_counters, cpu); |
| 107 | sum += counters[idx]; |
| 108 | } |
| 109 | |
| 110 | return sum; |
| 111 | } |
| 112 | |
Sven Eckelmann | 0294ca0 | 2012-05-16 20:23:15 +0200 | [diff] [blame] | 113 | static struct net_device_stats *batadv_interface_stats(struct net_device *dev) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 114 | { |
Sven Eckelmann | 56303d3 | 2012-06-05 22:31:31 +0200 | [diff] [blame] | 115 | struct batadv_priv *bat_priv = netdev_priv(dev); |
Tobias Klauser | ab044f8 | 2017-04-05 13:46:31 +0200 | [diff] [blame] | 116 | struct net_device_stats *stats = &dev->stats; |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 117 | |
| 118 | stats->tx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_TX); |
| 119 | stats->tx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_TX_BYTES); |
| 120 | stats->tx_dropped = batadv_sum_counter(bat_priv, BATADV_CNT_TX_DROPPED); |
| 121 | stats->rx_packets = batadv_sum_counter(bat_priv, BATADV_CNT_RX); |
| 122 | stats->rx_bytes = batadv_sum_counter(bat_priv, BATADV_CNT_RX_BYTES); |
| 123 | return stats; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 124 | } |
| 125 | |
Sven Eckelmann | 0294ca0 | 2012-05-16 20:23:15 +0200 | [diff] [blame] | 126 | static int batadv_interface_set_mac_addr(struct net_device *dev, void *p) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 127 | { |
Sven Eckelmann | 56303d3 | 2012-06-05 22:31:31 +0200 | [diff] [blame] | 128 | struct batadv_priv *bat_priv = netdev_priv(dev); |
Antonio Quartulli | 94d1dd8 | 2014-03-31 13:48:10 +0200 | [diff] [blame] | 129 | struct batadv_softif_vlan *vlan; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 130 | struct sockaddr *addr = p; |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 131 | u8 old_addr[ETH_ALEN]; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 132 | |
| 133 | if (!is_valid_ether_addr(addr->sa_data)) |
| 134 | return -EADDRNOTAVAIL; |
| 135 | |
Antonio Quartulli | 8fdd015 | 2014-01-22 00:42:11 +0100 | [diff] [blame] | 136 | ether_addr_copy(old_addr, dev->dev_addr); |
Jakub Kicinski | 0f00e70 | 2021-10-19 09:39:27 -0700 | [diff] [blame] | 137 | eth_hw_addr_set(dev, addr->sa_data); |
Def | 40a3eb3 | 2012-09-20 14:56:13 +0200 | [diff] [blame] | 138 | |
Antonio Quartulli | 015758d | 2011-07-09 17:52:13 +0200 | [diff] [blame] | 139 | /* only modify transtable if it has been initialized before */ |
Antonio Quartulli | 94d1dd8 | 2014-03-31 13:48:10 +0200 | [diff] [blame] | 140 | if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) |
| 141 | return 0; |
| 142 | |
| 143 | rcu_read_lock(); |
| 144 | hlist_for_each_entry_rcu(vlan, &bat_priv->softif_vlan_list, list) { |
| 145 | batadv_tt_local_remove(bat_priv, old_addr, vlan->vid, |
Sven Eckelmann | 08c36d3 | 2012-05-12 02:09:39 +0200 | [diff] [blame] | 146 | "mac address changed", false); |
Antonio Quartulli | 94d1dd8 | 2014-03-31 13:48:10 +0200 | [diff] [blame] | 147 | batadv_tt_local_add(dev, addr->sa_data, vlan->vid, |
Antonio Quartulli | 9464d07 | 2013-11-16 12:03:48 +0100 | [diff] [blame] | 148 | BATADV_NULL_IFINDEX, BATADV_NO_MARK); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 149 | } |
Antonio Quartulli | 94d1dd8 | 2014-03-31 13:48:10 +0200 | [diff] [blame] | 150 | rcu_read_unlock(); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 151 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 152 | return 0; |
| 153 | } |
| 154 | |
Sven Eckelmann | 0294ca0 | 2012-05-16 20:23:15 +0200 | [diff] [blame] | 155 | static int batadv_interface_change_mtu(struct net_device *dev, int new_mtu) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 156 | { |
| 157 | /* check ranges */ |
Sven Eckelmann | 825ffe1 | 2017-08-23 21:52:13 +0200 | [diff] [blame] | 158 | if (new_mtu < 68 || new_mtu > batadv_hardif_min_mtu(dev)) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 159 | return -EINVAL; |
| 160 | |
| 161 | dev->mtu = new_mtu; |
| 162 | |
| 163 | return 0; |
| 164 | } |
| 165 | |
Linus LĂĽssing | a4deee1a | 2013-05-26 17:56:07 +0200 | [diff] [blame] | 166 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 167 | * batadv_interface_set_rx_mode() - set the rx mode of a device |
Linus LĂĽssing | a4deee1a | 2013-05-26 17:56:07 +0200 | [diff] [blame] | 168 | * @dev: registered network device to modify |
| 169 | * |
| 170 | * We do not actually need to set any rx filters for the virtual batman |
| 171 | * soft interface. However a dummy handler enables a user to set static |
| 172 | * multicast listeners for instance. |
| 173 | */ |
| 174 | static void batadv_interface_set_rx_mode(struct net_device *dev) |
| 175 | { |
| 176 | } |
| 177 | |
Luc Van Oostenryck | 6bf9e4d | 2018-04-24 15:18:46 +0200 | [diff] [blame] | 178 | static netdev_tx_t batadv_interface_tx(struct sk_buff *skb, |
| 179 | struct net_device *soft_iface) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 180 | { |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 181 | struct ethhdr *ethhdr; |
Sven Eckelmann | 56303d3 | 2012-06-05 22:31:31 +0200 | [diff] [blame] | 182 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
| 183 | struct batadv_hard_iface *primary_if = NULL; |
Sven Eckelmann | 9641269 | 2012-06-05 22:31:30 +0200 | [diff] [blame] | 184 | struct batadv_bcast_packet *bcast_packet; |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 185 | static const u8 stp_addr[ETH_ALEN] = {0x01, 0x80, 0xC2, 0x00, |
| 186 | 0x00, 0x00}; |
| 187 | static const u8 ectp_addr[ETH_ALEN] = {0xCF, 0x00, 0x00, 0x00, |
| 188 | 0x00, 0x00}; |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 189 | enum batadv_dhcp_recipient dhcp_rcp = BATADV_DHCP_NO; |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 190 | u8 *dst_hint = NULL, chaddr[ETH_ALEN]; |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 191 | struct vlan_ethhdr *vhdr; |
Marek Lindner | be7af5c | 2011-09-08 13:12:53 +0200 | [diff] [blame] | 192 | unsigned int header_len = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 193 | int data_len = skb->len, ret; |
Linus LĂĽssing | 3f69339 | 2021-05-17 00:33:07 +0200 | [diff] [blame] | 194 | unsigned long brd_delay = 0; |
Marek Lindner | a19d3d8 | 2013-05-27 15:33:25 +0800 | [diff] [blame] | 195 | bool do_bcast = false, client_added; |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 196 | unsigned short vid; |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 197 | u32 seqno; |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 198 | int gw_mode; |
Linus LĂĽssing | 32e7274 | 2019-03-23 05:47:41 +0100 | [diff] [blame] | 199 | enum batadv_forw_mode forw_mode = BATADV_FORW_SINGLE; |
Linus LĂĽssing | 1d8ab8d | 2014-02-15 17:47:52 +0100 | [diff] [blame] | 200 | struct batadv_orig_node *mcast_single_orig = NULL; |
Linus LĂĽssing | 938f2e0 | 2022-01-01 06:27:13 +0100 | [diff] [blame] | 201 | int mcast_is_routable = 0; |
Linus LĂĽssing | 53cf037 | 2015-06-30 23:45:26 +0200 | [diff] [blame] | 202 | int network_offset = ETH_HLEN; |
Linus LĂĽssing | b61ec31 | 2018-12-30 16:52:53 +0100 | [diff] [blame] | 203 | __be16 proto; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 204 | |
Sven Eckelmann | 39c75a5 | 2012-06-03 22:19:22 +0200 | [diff] [blame] | 205 | if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 206 | goto dropped; |
| 207 | |
Linus LĂĽssing | e2d9ba4 | 2017-02-17 11:17:07 +0100 | [diff] [blame] | 208 | /* reset control block to avoid left overs from previous users */ |
| 209 | memset(skb->cb, 0, sizeof(struct batadv_skb_cb)); |
| 210 | |
Florian Westphal | 860e953 | 2016-05-03 16:33:13 +0200 | [diff] [blame] | 211 | netif_trans_update(soft_iface); |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 212 | vid = batadv_get_vid(skb, 0); |
Sven Eckelmann | 9114daa | 2018-12-31 22:31:01 +0100 | [diff] [blame] | 213 | |
| 214 | skb_reset_mac_header(skb); |
Linus LĂĽssing | 927c2ed | 2014-01-19 22:22:45 +0100 | [diff] [blame] | 215 | ethhdr = eth_hdr(skb); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 216 | |
Linus LĂĽssing | b61ec31 | 2018-12-30 16:52:53 +0100 | [diff] [blame] | 217 | proto = ethhdr->h_proto; |
| 218 | |
| 219 | switch (ntohs(proto)) { |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 220 | case ETH_P_8021Q: |
Eric Dumazet | 4ffcbfa | 2019-02-11 14:41:22 -0800 | [diff] [blame] | 221 | if (!pskb_may_pull(skb, sizeof(*vhdr))) |
| 222 | goto dropped; |
Linus LĂĽssing | 927c2ed | 2014-01-19 22:22:45 +0100 | [diff] [blame] | 223 | vhdr = vlan_eth_hdr(skb); |
Linus LĂĽssing | b61ec31 | 2018-12-30 16:52:53 +0100 | [diff] [blame] | 224 | proto = vhdr->h_vlan_encapsulated_proto; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 225 | |
Marek Lindner | 9d1601ef | 2016-03-20 18:39:56 +0800 | [diff] [blame] | 226 | /* drop batman-in-batman packets to prevent loops */ |
Linus LĂĽssing | b61ec31 | 2018-12-30 16:52:53 +0100 | [diff] [blame] | 227 | if (proto != htons(ETH_P_BATMAN)) { |
Linus LĂĽssing | 53cf037 | 2015-06-30 23:45:26 +0200 | [diff] [blame] | 228 | network_offset += VLAN_HLEN; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 229 | break; |
Linus LĂĽssing | 53cf037 | 2015-06-30 23:45:26 +0200 | [diff] [blame] | 230 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 231 | |
Sven Eckelmann | a7757d3 | 2019-10-31 17:34:37 +0100 | [diff] [blame] | 232 | fallthrough; |
Antonio Quartulli | af5d4f7 | 2012-11-26 00:38:50 +0100 | [diff] [blame] | 233 | case ETH_P_BATMAN: |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 234 | goto dropped; |
Simon Wunderlich | a7f6ee9 | 2012-01-22 20:00:18 +0100 | [diff] [blame] | 235 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 236 | |
Linus LĂĽssing | 53cf037 | 2015-06-30 23:45:26 +0200 | [diff] [blame] | 237 | skb_set_network_header(skb, network_offset); |
| 238 | |
Sven Eckelmann | 08adf15 | 2012-05-12 13:38:47 +0200 | [diff] [blame] | 239 | if (batadv_bla_tx(bat_priv, skb, vid)) |
Simon Wunderlich | 2372138 | 2012-01-22 20:00:19 +0100 | [diff] [blame] | 240 | goto dropped; |
| 241 | |
Linus LĂĽssing | 9d2c948 | 2013-08-06 20:21:15 +0200 | [diff] [blame] | 242 | /* skb->data might have been reallocated by batadv_bla_tx() */ |
Linus LĂĽssing | 927c2ed | 2014-01-19 22:22:45 +0100 | [diff] [blame] | 243 | ethhdr = eth_hdr(skb); |
Linus LĂĽssing | 9d2c948 | 2013-08-06 20:21:15 +0200 | [diff] [blame] | 244 | |
Antonio Quartulli | a73105b | 2011-04-27 14:27:44 +0200 | [diff] [blame] | 245 | /* Register the client MAC in the transtable */ |
Simon Wunderlich | d3e9768 | 2016-11-24 16:11:01 +0100 | [diff] [blame] | 246 | if (!is_multicast_ether_addr(ethhdr->h_source) && |
| 247 | !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) { |
Marek Lindner | a19d3d8 | 2013-05-27 15:33:25 +0800 | [diff] [blame] | 248 | client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source, |
Antonio Quartulli | 9464d07 | 2013-11-16 12:03:48 +0100 | [diff] [blame] | 249 | vid, skb->skb_iif, |
| 250 | skb->mark); |
Marek Lindner | a19d3d8 | 2013-05-27 15:33:25 +0800 | [diff] [blame] | 251 | if (!client_added) |
| 252 | goto dropped; |
| 253 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 254 | |
Linus LĂĽssing | b61ec31 | 2018-12-30 16:52:53 +0100 | [diff] [blame] | 255 | /* Snoop address candidates from DHCPACKs for early DAT filling */ |
| 256 | batadv_dat_snoop_outgoing_dhcp_ack(bat_priv, skb, proto, vid); |
| 257 | |
Simon Wunderlich | b1a8c04 | 2012-01-22 20:00:25 +0100 | [diff] [blame] | 258 | /* don't accept stp packets. STP does not help in meshes. |
| 259 | * better use the bridge loop avoidance ... |
Simon Wunderlich | 4934ab9 | 2012-08-19 20:10:09 +0200 | [diff] [blame] | 260 | * |
| 261 | * The same goes for ECTP sent at least by some Cisco Switches, |
| 262 | * it might confuse the mesh when used with bridge loop avoidance. |
Simon Wunderlich | b1a8c04 | 2012-01-22 20:00:25 +0100 | [diff] [blame] | 263 | */ |
Sven Eckelmann | 1eda58b | 2012-05-12 13:48:58 +0200 | [diff] [blame] | 264 | if (batadv_compare_eth(ethhdr->h_dest, stp_addr)) |
Simon Wunderlich | b1a8c04 | 2012-01-22 20:00:25 +0100 | [diff] [blame] | 265 | goto dropped; |
| 266 | |
Simon Wunderlich | 4934ab9 | 2012-08-19 20:10:09 +0200 | [diff] [blame] | 267 | if (batadv_compare_eth(ethhdr->h_dest, ectp_addr)) |
| 268 | goto dropped; |
| 269 | |
Antonio Quartulli | 3a24a63 | 2016-05-06 02:46:38 +0800 | [diff] [blame] | 270 | gw_mode = atomic_read(&bat_priv->gw.mode); |
Marek Lindner | be7af5c | 2011-09-08 13:12:53 +0200 | [diff] [blame] | 271 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 272 | /* if gw mode is off, broadcast every packet */ |
| 273 | if (gw_mode == BATADV_GW_MODE_OFF) { |
| 274 | do_bcast = true; |
| 275 | goto send; |
Marek Lindner | be7af5c | 2011-09-08 13:12:53 +0200 | [diff] [blame] | 276 | } |
Linus LĂĽssing | 9d2c948 | 2013-08-06 20:21:15 +0200 | [diff] [blame] | 277 | |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 278 | dhcp_rcp = batadv_gw_dhcp_recipient_get(skb, &header_len, |
| 279 | chaddr); |
| 280 | /* skb->data may have been modified by |
| 281 | * batadv_gw_dhcp_recipient_get() |
Linus LĂĽssing | 9d2c948 | 2013-08-06 20:21:15 +0200 | [diff] [blame] | 282 | */ |
Linus LĂĽssing | 927c2ed | 2014-01-19 22:22:45 +0100 | [diff] [blame] | 283 | ethhdr = eth_hdr(skb); |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 284 | /* if gw_mode is on, broadcast any non-DHCP message. |
| 285 | * All the DHCP packets are going to be sent as unicast |
| 286 | */ |
| 287 | if (dhcp_rcp == BATADV_DHCP_NO) { |
| 288 | do_bcast = true; |
| 289 | goto send; |
| 290 | } |
| 291 | |
| 292 | if (dhcp_rcp == BATADV_DHCP_TO_CLIENT) |
| 293 | dst_hint = chaddr; |
| 294 | else if ((gw_mode == BATADV_GW_MODE_SERVER) && |
| 295 | (dhcp_rcp == BATADV_DHCP_TO_SERVER)) |
| 296 | /* gateways should not forward any DHCP message if |
| 297 | * directed to a DHCP server |
| 298 | */ |
| 299 | goto dropped; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 300 | |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 301 | send: |
Linus LĂĽssing | 1d8ab8d | 2014-02-15 17:47:52 +0100 | [diff] [blame] | 302 | if (do_bcast && !is_broadcast_ether_addr(ethhdr->h_dest)) { |
| 303 | forw_mode = batadv_mcast_forw_mode(bat_priv, skb, |
Linus LĂĽssing | 938f2e0 | 2022-01-01 06:27:13 +0100 | [diff] [blame] | 304 | &mcast_single_orig, |
| 305 | &mcast_is_routable); |
Linus LĂĽssing | 1d8ab8d | 2014-02-15 17:47:52 +0100 | [diff] [blame] | 306 | if (forw_mode == BATADV_FORW_NONE) |
| 307 | goto dropped; |
| 308 | |
Linus LĂĽssing | 32e7274 | 2019-03-23 05:47:41 +0100 | [diff] [blame] | 309 | if (forw_mode == BATADV_FORW_SINGLE || |
| 310 | forw_mode == BATADV_FORW_SOME) |
Linus LĂĽssing | 1d8ab8d | 2014-02-15 17:47:52 +0100 | [diff] [blame] | 311 | do_bcast = false; |
| 312 | } |
| 313 | } |
| 314 | |
Simon Wunderlich | c54f38c | 2013-07-29 17:56:44 +0200 | [diff] [blame] | 315 | batadv_skb_set_priority(skb, 0); |
| 316 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 317 | /* ethernet packet should be broadcasted */ |
| 318 | if (do_bcast) { |
Sven Eckelmann | e5d8925 | 2012-05-12 13:48:54 +0200 | [diff] [blame] | 319 | primary_if = batadv_primary_if_get_selected(bat_priv); |
Marek Lindner | 32ae9b2 | 2011-04-20 15:40:58 +0200 | [diff] [blame] | 320 | if (!primary_if) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 321 | goto dropped; |
| 322 | |
Antonio Quartulli | c384ea3 | 2011-06-26 03:37:18 +0200 | [diff] [blame] | 323 | /* in case of ARP request, we do not immediately broadcasti the |
| 324 | * packet, instead we first wait for DAT to try to retrieve the |
| 325 | * correct ARP entry |
| 326 | */ |
| 327 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, skb)) |
| 328 | brd_delay = msecs_to_jiffies(ARP_REQ_DELAY); |
| 329 | |
Sven Eckelmann | 04b482a | 2012-05-12 02:09:38 +0200 | [diff] [blame] | 330 | if (batadv_skb_head_push(skb, sizeof(*bcast_packet)) < 0) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 331 | goto dropped; |
| 332 | |
Sven Eckelmann | 9641269 | 2012-06-05 22:31:30 +0200 | [diff] [blame] | 333 | bcast_packet = (struct batadv_bcast_packet *)skb->data; |
Simon Wunderlich | a40d9b0 | 2013-12-02 20:38:31 +0100 | [diff] [blame] | 334 | bcast_packet->version = BATADV_COMPAT_VERSION; |
Linus LĂĽssing | 3f69339 | 2021-05-17 00:33:07 +0200 | [diff] [blame] | 335 | bcast_packet->ttl = BATADV_TTL - 1; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 336 | |
| 337 | /* batman packet type: broadcast */ |
Simon Wunderlich | a40d9b0 | 2013-12-02 20:38:31 +0100 | [diff] [blame] | 338 | bcast_packet->packet_type = BATADV_BCAST; |
Sven Eckelmann | 162d549 | 2012-06-28 11:56:52 +0200 | [diff] [blame] | 339 | bcast_packet->reserved = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 340 | |
| 341 | /* hw address of first interface is the orig mac because only |
Sven Eckelmann | 9cfc7bd | 2012-05-12 02:09:43 +0200 | [diff] [blame] | 342 | * this mac is known throughout the mesh |
| 343 | */ |
Antonio Quartulli | 8fdd015 | 2014-01-22 00:42:11 +0100 | [diff] [blame] | 344 | ether_addr_copy(bcast_packet->orig, |
| 345 | primary_if->net_dev->dev_addr); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 346 | |
| 347 | /* set broadcast sequence number */ |
Sven Eckelmann | bbb1f90 | 2012-07-08 17:13:15 +0200 | [diff] [blame] | 348 | seqno = atomic_inc_return(&bat_priv->bcast_seqno); |
| 349 | bcast_packet->seqno = htonl(seqno); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 350 | |
Linus LĂĽssing | 3f69339 | 2021-05-17 00:33:07 +0200 | [diff] [blame] | 351 | batadv_send_bcast_packet(bat_priv, skb, brd_delay, true); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 352 | /* unicast packet */ |
| 353 | } else { |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 354 | /* DHCP packets going to a server will use the GW feature */ |
| 355 | if (dhcp_rcp == BATADV_DHCP_TO_SERVER) { |
Linus LĂĽssing | 9d2c948 | 2013-08-06 20:21:15 +0200 | [diff] [blame] | 356 | ret = batadv_gw_out_of_range(bat_priv, skb); |
Marek Lindner | be7af5c | 2011-09-08 13:12:53 +0200 | [diff] [blame] | 357 | if (ret) |
| 358 | goto dropped; |
Linus LĂĽssing | e300d31 | 2013-07-03 10:40:00 +0200 | [diff] [blame] | 359 | ret = batadv_send_skb_via_gw(bat_priv, skb, vid); |
Linus LĂĽssing | 1d8ab8d | 2014-02-15 17:47:52 +0100 | [diff] [blame] | 360 | } else if (mcast_single_orig) { |
Linus LĂĽssing | 3236d21 | 2020-09-15 09:54:08 +0200 | [diff] [blame] | 361 | ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, |
| 362 | mcast_single_orig); |
Linus LĂĽssing | 32e7274 | 2019-03-23 05:47:41 +0100 | [diff] [blame] | 363 | } else if (forw_mode == BATADV_FORW_SOME) { |
Linus LĂĽssing | 938f2e0 | 2022-01-01 06:27:13 +0100 | [diff] [blame] | 364 | ret = batadv_mcast_forw_send(bat_priv, skb, vid, |
| 365 | mcast_is_routable); |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 366 | } else { |
| 367 | if (batadv_dat_snoop_outgoing_arp_request(bat_priv, |
| 368 | skb)) |
| 369 | goto dropped; |
Linus LĂĽssing | e300d31 | 2013-07-03 10:40:00 +0200 | [diff] [blame] | 370 | |
Antonio Quartulli | 6c413b1 | 2013-11-05 19:31:08 +0100 | [diff] [blame] | 371 | batadv_dat_snoop_outgoing_arp_reply(bat_priv, skb); |
| 372 | |
| 373 | ret = batadv_send_skb_via_tt(bat_priv, skb, dst_hint, |
| 374 | vid); |
| 375 | } |
Sven Eckelmann | eaac2c8 | 2016-07-17 21:04:01 +0200 | [diff] [blame] | 376 | if (ret != NET_XMIT_SUCCESS) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 377 | goto dropped_freed; |
| 378 | } |
| 379 | |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 380 | batadv_inc_counter(bat_priv, BATADV_CNT_TX); |
| 381 | batadv_add_counter(bat_priv, BATADV_CNT_TX_BYTES, data_len); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 382 | goto end; |
| 383 | |
| 384 | dropped: |
| 385 | kfree_skb(skb); |
| 386 | dropped_freed: |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 387 | batadv_inc_counter(bat_priv, BATADV_CNT_TX_DROPPED); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 388 | end: |
Sven Eckelmann | 79a0bff | 2021-08-08 19:11:08 +0200 | [diff] [blame] | 389 | batadv_orig_node_put(mcast_single_orig); |
| 390 | batadv_hardif_put(primary_if); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 391 | return NETDEV_TX_OK; |
| 392 | } |
| 393 | |
Sven Eckelmann | f298cb9 | 2016-02-28 11:38:50 +0100 | [diff] [blame] | 394 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 395 | * batadv_interface_rx() - receive ethernet frame on local batman-adv interface |
Sven Eckelmann | f298cb9 | 2016-02-28 11:38:50 +0100 | [diff] [blame] | 396 | * @soft_iface: local interface which will receive the ethernet frame |
| 397 | * @skb: ethernet frame for @soft_iface |
Sven Eckelmann | f298cb9 | 2016-02-28 11:38:50 +0100 | [diff] [blame] | 398 | * @hdr_size: size of already parsed batman-adv header |
| 399 | * @orig_node: originator from which the batman-adv packet was sent |
| 400 | * |
Sven Eckelmann | bccb48c | 2020-06-01 20:13:21 +0200 | [diff] [blame] | 401 | * Sends an ethernet frame to the receive path of the local @soft_iface. |
Sven Eckelmann | f298cb9 | 2016-02-28 11:38:50 +0100 | [diff] [blame] | 402 | * skb->data has still point to the batman-adv header with the size @hdr_size. |
| 403 | * The caller has to have parsed this header already and made sure that at least |
| 404 | * @hdr_size bytes are still available for pull in @skb. |
| 405 | * |
| 406 | * The packet may still get dropped. This can happen when the encapsulated |
| 407 | * ethernet frame is invalid or contains again an batman-adv packet. Also |
| 408 | * unicast packets will be dropped directly when it was sent between two |
| 409 | * isolated clients. |
| 410 | */ |
Sven Eckelmann | 04b482a | 2012-05-12 02:09:38 +0200 | [diff] [blame] | 411 | void batadv_interface_rx(struct net_device *soft_iface, |
Sven Eckelmann | 6535db5 | 2016-02-28 11:38:51 +0100 | [diff] [blame] | 412 | struct sk_buff *skb, int hdr_size, |
| 413 | struct batadv_orig_node *orig_node) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 414 | { |
Simon Wunderlich | a40d9b0 | 2013-12-02 20:38:31 +0100 | [diff] [blame] | 415 | struct batadv_bcast_packet *batadv_bcast_packet; |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 416 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 417 | struct vlan_ethhdr *vhdr; |
| 418 | struct ethhdr *ethhdr; |
| 419 | unsigned short vid; |
Linus LĂĽssing | 74c09b7 | 2020-09-15 09:54:09 +0200 | [diff] [blame] | 420 | int packet_type; |
Simon Wunderlich | 2d3f6cc | 2012-07-04 20:38:19 +0200 | [diff] [blame] | 421 | |
Simon Wunderlich | a40d9b0 | 2013-12-02 20:38:31 +0100 | [diff] [blame] | 422 | batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; |
Linus LĂĽssing | 74c09b7 | 2020-09-15 09:54:09 +0200 | [diff] [blame] | 423 | packet_type = batadv_bcast_packet->packet_type; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 424 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 425 | skb_pull_rcsum(skb, hdr_size); |
| 426 | skb_reset_mac_header(skb); |
| 427 | |
Antonio Quartulli | 55883fd | 2013-12-23 01:28:05 +0100 | [diff] [blame] | 428 | /* clean the netfilter state now that the batman-adv header has been |
| 429 | * removed |
| 430 | */ |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 431 | nf_reset_ct(skb); |
Antonio Quartulli | 55883fd | 2013-12-23 01:28:05 +0100 | [diff] [blame] | 432 | |
Sven Eckelmann | c782966 | 2016-02-26 17:56:13 +0100 | [diff] [blame] | 433 | if (unlikely(!pskb_may_pull(skb, ETH_HLEN))) |
| 434 | goto dropped; |
| 435 | |
Antonio Quartulli | 2b1e2cb | 2013-12-23 21:43:39 +0100 | [diff] [blame] | 436 | vid = batadv_get_vid(skb, 0); |
Antonio Quartulli | 7ed4be9 | 2013-04-08 15:08:18 +0200 | [diff] [blame] | 437 | ethhdr = eth_hdr(skb); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 438 | |
| 439 | switch (ntohs(ethhdr->h_proto)) { |
| 440 | case ETH_P_8021Q: |
Sven Eckelmann | c782966 | 2016-02-26 17:56:13 +0100 | [diff] [blame] | 441 | if (!pskb_may_pull(skb, VLAN_ETH_HLEN)) |
| 442 | goto dropped; |
| 443 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 444 | vhdr = (struct vlan_ethhdr *)skb->data; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 445 | |
Marek Lindner | 9d1601ef | 2016-03-20 18:39:56 +0800 | [diff] [blame] | 446 | /* drop batman-in-batman packets to prevent loops */ |
| 447 | if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 448 | break; |
| 449 | |
Sven Eckelmann | a7757d3 | 2019-10-31 17:34:37 +0100 | [diff] [blame] | 450 | fallthrough; |
Antonio Quartulli | af5d4f7 | 2012-11-26 00:38:50 +0100 | [diff] [blame] | 451 | case ETH_P_BATMAN: |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 452 | goto dropped; |
| 453 | } |
| 454 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 455 | /* skb->dev & skb->pkt_type are set here */ |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 456 | skb->protocol = eth_type_trans(skb, soft_iface); |
Matthias Schiffer | abd6360 | 2018-01-23 10:59:49 +0100 | [diff] [blame] | 457 | skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 458 | |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 459 | batadv_inc_counter(bat_priv, BATADV_CNT_RX); |
| 460 | batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES, |
| 461 | skb->len + ETH_HLEN); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 462 | |
Antonio Quartulli | 74490f9 | 2012-11-08 21:55:30 +0100 | [diff] [blame] | 463 | /* Let the bridge loop avoidance check the packet. If will |
| 464 | * not handle it, we can safely push it up. |
| 465 | */ |
Linus LĂĽssing | 74c09b7 | 2020-09-15 09:54:09 +0200 | [diff] [blame] | 466 | if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) |
Antonio Quartulli | 74490f9 | 2012-11-08 21:55:30 +0100 | [diff] [blame] | 467 | goto out; |
| 468 | |
Antonio Quartulli | 3713517 | 2012-07-05 23:38:30 +0200 | [diff] [blame] | 469 | if (orig_node) |
| 470 | batadv_tt_add_temporary_global_entry(bat_priv, orig_node, |
Antonio Quartulli | c018ad3 | 2013-06-04 12:11:39 +0200 | [diff] [blame] | 471 | ethhdr->h_source, vid); |
Antonio Quartulli | 3713517 | 2012-07-05 23:38:30 +0200 | [diff] [blame] | 472 | |
Antonio Quartulli | 42cb0be | 2013-11-16 12:03:52 +0100 | [diff] [blame] | 473 | if (is_multicast_ether_addr(ethhdr->h_dest)) { |
| 474 | /* set the mark on broadcast packets if AP isolation is ON and |
| 475 | * the packet is coming from an "isolated" client |
| 476 | */ |
| 477 | if (batadv_vlan_ap_isola_get(bat_priv, vid) && |
| 478 | batadv_tt_global_is_isolated(bat_priv, ethhdr->h_source, |
| 479 | vid)) { |
| 480 | /* save bits in skb->mark not covered by the mask and |
| 481 | * apply the mark on the rest |
| 482 | */ |
| 483 | skb->mark &= ~bat_priv->isolation_mark_mask; |
| 484 | skb->mark |= bat_priv->isolation_mark; |
| 485 | } |
| 486 | } else if (batadv_is_ap_isolated(bat_priv, ethhdr->h_source, |
| 487 | ethhdr->h_dest, vid)) { |
Antonio Quartulli | 59b699c | 2011-07-07 15:35:36 +0200 | [diff] [blame] | 488 | goto dropped; |
Antonio Quartulli | 42cb0be | 2013-11-16 12:03:52 +0100 | [diff] [blame] | 489 | } |
Antonio Quartulli | 59b699c | 2011-07-07 15:35:36 +0200 | [diff] [blame] | 490 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 491 | netif_rx(skb); |
Simon Wunderlich | ba85fac | 2011-04-17 20:34:27 +0200 | [diff] [blame] | 492 | goto out; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 493 | |
| 494 | dropped: |
| 495 | kfree_skb(skb); |
| 496 | out: |
| 497 | return; |
| 498 | } |
| 499 | |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 500 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 501 | * batadv_softif_vlan_release() - release vlan from lists and queue for free |
| 502 | * after rcu grace period |
Sven Eckelmann | 6be4d30 | 2016-01-16 10:29:42 +0100 | [diff] [blame] | 503 | * @ref: kref pointer of the vlan object |
| 504 | */ |
Sven Eckelmann | 6340dcb | 2021-08-08 19:56:17 +0200 | [diff] [blame] | 505 | void batadv_softif_vlan_release(struct kref *ref) |
Sven Eckelmann | 6be4d30 | 2016-01-16 10:29:42 +0100 | [diff] [blame] | 506 | { |
| 507 | struct batadv_softif_vlan *vlan; |
| 508 | |
| 509 | vlan = container_of(ref, struct batadv_softif_vlan, refcount); |
| 510 | |
| 511 | spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); |
| 512 | hlist_del_rcu(&vlan->list); |
| 513 | spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock); |
| 514 | |
| 515 | kfree_rcu(vlan, rcu); |
| 516 | } |
| 517 | |
| 518 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 519 | * batadv_softif_vlan_get() - get the vlan object for a specific vid |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 520 | * @bat_priv: the bat priv with all the soft interface information |
| 521 | * @vid: the identifier of the vlan object to retrieve |
| 522 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 523 | * Return: the private data of the vlan matching the vid passed as argument or |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 524 | * NULL otherwise. The refcounter of the returned object is incremented by 1. |
| 525 | */ |
Antonio Quartulli | 90f4435 | 2013-07-02 11:04:35 +0200 | [diff] [blame] | 526 | struct batadv_softif_vlan *batadv_softif_vlan_get(struct batadv_priv *bat_priv, |
| 527 | unsigned short vid) |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 528 | { |
| 529 | struct batadv_softif_vlan *vlan_tmp, *vlan = NULL; |
| 530 | |
| 531 | rcu_read_lock(); |
| 532 | hlist_for_each_entry_rcu(vlan_tmp, &bat_priv->softif_vlan_list, list) { |
| 533 | if (vlan_tmp->vid != vid) |
| 534 | continue; |
| 535 | |
Sven Eckelmann | 6be4d30 | 2016-01-16 10:29:42 +0100 | [diff] [blame] | 536 | if (!kref_get_unless_zero(&vlan_tmp->refcount)) |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 537 | continue; |
| 538 | |
| 539 | vlan = vlan_tmp; |
| 540 | break; |
| 541 | } |
| 542 | rcu_read_unlock(); |
| 543 | |
| 544 | return vlan; |
| 545 | } |
| 546 | |
| 547 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 548 | * batadv_softif_create_vlan() - allocate the needed resources for a new vlan |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 549 | * @bat_priv: the bat priv with all the soft interface information |
| 550 | * @vid: the VLAN identifier |
| 551 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 552 | * Return: 0 on success, a negative error otherwise. |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 553 | */ |
| 554 | int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid) |
| 555 | { |
| 556 | struct batadv_softif_vlan *vlan; |
| 557 | |
Sven Eckelmann | 94cb82f | 2018-08-12 21:04:43 +0200 | [diff] [blame] | 558 | spin_lock_bh(&bat_priv->softif_vlan_list_lock); |
| 559 | |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 560 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
| 561 | if (vlan) { |
Sven Eckelmann | 9c3bf08 | 2016-01-17 11:01:21 +0100 | [diff] [blame] | 562 | batadv_softif_vlan_put(vlan); |
Sven Eckelmann | 94cb82f | 2018-08-12 21:04:43 +0200 | [diff] [blame] | 563 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 564 | return -EEXIST; |
| 565 | } |
| 566 | |
| 567 | vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC); |
Sven Eckelmann | 94cb82f | 2018-08-12 21:04:43 +0200 | [diff] [blame] | 568 | if (!vlan) { |
| 569 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 570 | return -ENOMEM; |
Sven Eckelmann | 94cb82f | 2018-08-12 21:04:43 +0200 | [diff] [blame] | 571 | } |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 572 | |
Antonio Quartulli | 35df3b2 | 2014-05-08 17:13:15 +0200 | [diff] [blame] | 573 | vlan->bat_priv = bat_priv; |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 574 | vlan->vid = vid; |
Sven Eckelmann | 6be4d30 | 2016-01-16 10:29:42 +0100 | [diff] [blame] | 575 | kref_init(&vlan->refcount); |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 576 | |
Antonio Quartulli | b8cbd81 | 2013-07-02 11:04:36 +0200 | [diff] [blame] | 577 | atomic_set(&vlan->ap_isolation, 0); |
| 578 | |
Sven Eckelmann | df28ca6 | 2016-07-15 17:39:29 +0200 | [diff] [blame] | 579 | kref_get(&vlan->refcount); |
Antonio Quartulli | 35df3b2 | 2014-05-08 17:13:15 +0200 | [diff] [blame] | 580 | hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list); |
| 581 | spin_unlock_bh(&bat_priv->softif_vlan_list_lock); |
| 582 | |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 583 | /* add a new TT local entry. This one will be marked with the NOPURGE |
| 584 | * flag |
| 585 | */ |
| 586 | batadv_tt_local_add(bat_priv->soft_iface, |
| 587 | bat_priv->soft_iface->dev_addr, vid, |
Antonio Quartulli | 9464d07 | 2013-11-16 12:03:48 +0100 | [diff] [blame] | 588 | BATADV_NULL_IFINDEX, BATADV_NO_MARK); |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 589 | |
Sven Eckelmann | df28ca6 | 2016-07-15 17:39:29 +0200 | [diff] [blame] | 590 | /* don't return reference to new softif_vlan */ |
| 591 | batadv_softif_vlan_put(vlan); |
| 592 | |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 593 | return 0; |
| 594 | } |
| 595 | |
| 596 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 597 | * batadv_softif_destroy_vlan() - remove and destroy a softif_vlan object |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 598 | * @bat_priv: the bat priv with all the soft interface information |
| 599 | * @vlan: the object to remove |
| 600 | */ |
| 601 | static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv, |
| 602 | struct batadv_softif_vlan *vlan) |
| 603 | { |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 604 | /* explicitly remove the associated TT local entry because it is marked |
| 605 | * with the NOPURGE flag |
| 606 | */ |
| 607 | batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr, |
| 608 | vlan->vid, "vlan interface destroyed", false); |
| 609 | |
Sven Eckelmann | 9c3bf08 | 2016-01-17 11:01:21 +0100 | [diff] [blame] | 610 | batadv_softif_vlan_put(vlan); |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 611 | } |
| 612 | |
| 613 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 614 | * batadv_interface_add_vid() - ndo_add_vid API implementation |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 615 | * @dev: the netdev of the mesh interface |
Sven Eckelmann | 21ba5ab | 2020-07-31 20:33:00 +0200 | [diff] [blame] | 616 | * @proto: protocol of the vlan id |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 617 | * @vid: identifier of the new vlan |
| 618 | * |
| 619 | * Set up all the internal structures for handling the new vlan on top of the |
| 620 | * mesh interface |
| 621 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 622 | * Return: 0 on success or a negative error code in case of failure. |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 623 | */ |
| 624 | static int batadv_interface_add_vid(struct net_device *dev, __be16 proto, |
| 625 | unsigned short vid) |
| 626 | { |
| 627 | struct batadv_priv *bat_priv = netdev_priv(dev); |
Antonio Quartulli | 35df3b2 | 2014-05-08 17:13:15 +0200 | [diff] [blame] | 628 | struct batadv_softif_vlan *vlan; |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 629 | |
| 630 | /* only 802.1Q vlans are supported. |
| 631 | * batman-adv does not know how to handle other types |
| 632 | */ |
| 633 | if (proto != htons(ETH_P_8021Q)) |
| 634 | return -EINVAL; |
| 635 | |
| 636 | vid |= BATADV_VLAN_HAS_TAG; |
| 637 | |
Antonio Quartulli | 35df3b2 | 2014-05-08 17:13:15 +0200 | [diff] [blame] | 638 | /* if a new vlan is getting created and it already exists, it means that |
| 639 | * it was not deleted yet. batadv_softif_vlan_get() increases the |
| 640 | * refcount in order to revive the object. |
| 641 | * |
| 642 | * if it does not exist then create it. |
| 643 | */ |
| 644 | vlan = batadv_softif_vlan_get(bat_priv, vid); |
| 645 | if (!vlan) |
| 646 | return batadv_softif_create_vlan(bat_priv, vid); |
| 647 | |
Antonio Quartulli | 35df3b2 | 2014-05-08 17:13:15 +0200 | [diff] [blame] | 648 | /* add a new TT local entry. This one will be marked with the NOPURGE |
| 649 | * flag. This must be added again, even if the vlan object already |
| 650 | * exists, because the entry was deleted by kill_vid() |
| 651 | */ |
| 652 | batadv_tt_local_add(bat_priv->soft_iface, |
| 653 | bat_priv->soft_iface->dev_addr, vid, |
| 654 | BATADV_NULL_IFINDEX, BATADV_NO_MARK); |
| 655 | |
| 656 | return 0; |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 657 | } |
| 658 | |
| 659 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 660 | * batadv_interface_kill_vid() - ndo_kill_vid API implementation |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 661 | * @dev: the netdev of the mesh interface |
Sven Eckelmann | 21ba5ab | 2020-07-31 20:33:00 +0200 | [diff] [blame] | 662 | * @proto: protocol of the vlan id |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 663 | * @vid: identifier of the deleted vlan |
| 664 | * |
| 665 | * Destroy all the internal structures used to handle the vlan identified by vid |
| 666 | * on top of the mesh interface |
| 667 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 668 | * Return: 0 on success, -EINVAL if the specified prototype is not ETH_P_8021Q |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 669 | * or -ENOENT if the specified vlan id wasn't registered. |
| 670 | */ |
| 671 | static int batadv_interface_kill_vid(struct net_device *dev, __be16 proto, |
| 672 | unsigned short vid) |
| 673 | { |
| 674 | struct batadv_priv *bat_priv = netdev_priv(dev); |
| 675 | struct batadv_softif_vlan *vlan; |
| 676 | |
| 677 | /* only 802.1Q vlans are supported. batman-adv does not know how to |
| 678 | * handle other types |
| 679 | */ |
| 680 | if (proto != htons(ETH_P_8021Q)) |
| 681 | return -EINVAL; |
| 682 | |
| 683 | vlan = batadv_softif_vlan_get(bat_priv, vid | BATADV_VLAN_HAS_TAG); |
| 684 | if (!vlan) |
| 685 | return -ENOENT; |
| 686 | |
| 687 | batadv_softif_destroy_vlan(bat_priv, vlan); |
| 688 | |
| 689 | /* finally free the vlan object */ |
Sven Eckelmann | 9c3bf08 | 2016-01-17 11:01:21 +0100 | [diff] [blame] | 690 | batadv_softif_vlan_put(vlan); |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 691 | |
| 692 | return 0; |
| 693 | } |
| 694 | |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 695 | /* batman-adv network devices have devices nesting below it and are a special |
| 696 | * "super class" of normal network devices; split their locks off into a |
| 697 | * separate class since they always nest. |
| 698 | */ |
| 699 | static struct lock_class_key batadv_netdev_xmit_lock_key; |
Cong Wang | 845e0eb | 2020-06-08 14:53:01 -0700 | [diff] [blame] | 700 | static struct lock_class_key batadv_netdev_addr_lock_key; |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 701 | |
| 702 | /** |
| 703 | * batadv_set_lockdep_class_one() - Set lockdep class for a single tx queue |
| 704 | * @dev: device which owns the tx queue |
| 705 | * @txq: tx queue to modify |
| 706 | * @_unused: always NULL |
| 707 | */ |
| 708 | static void batadv_set_lockdep_class_one(struct net_device *dev, |
| 709 | struct netdev_queue *txq, |
| 710 | void *_unused) |
| 711 | { |
| 712 | lockdep_set_class(&txq->_xmit_lock, &batadv_netdev_xmit_lock_key); |
| 713 | } |
| 714 | |
| 715 | /** |
| 716 | * batadv_set_lockdep_class() - Set txq and addr_list lockdep class |
| 717 | * @dev: network device to modify |
| 718 | */ |
| 719 | static void batadv_set_lockdep_class(struct net_device *dev) |
| 720 | { |
Cong Wang | 845e0eb | 2020-06-08 14:53:01 -0700 | [diff] [blame] | 721 | lockdep_set_class(&dev->addr_list_lock, &batadv_netdev_addr_lock_key); |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 722 | netdev_for_each_tx_queue(dev, batadv_set_lockdep_class_one, NULL); |
| 723 | } |
| 724 | |
Sven Eckelmann | 36c1d15 | 2012-08-20 09:03:59 +0200 | [diff] [blame] | 725 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 726 | * batadv_softif_init_late() - late stage initialization of soft interface |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 727 | * @dev: registered network device to modify |
| 728 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 729 | * Return: error code on failures |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 730 | */ |
| 731 | static int batadv_softif_init_late(struct net_device *dev) |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 732 | { |
Sven Eckelmann | 56303d3 | 2012-06-05 22:31:31 +0200 | [diff] [blame] | 733 | struct batadv_priv *bat_priv; |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 734 | u32 random_seqno; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 735 | int ret; |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 736 | size_t cnt_len = sizeof(u64) * BATADV_CNT_NUM; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 737 | |
Cong Wang | 1a33e10 | 2020-05-02 22:22:19 -0700 | [diff] [blame] | 738 | batadv_set_lockdep_class(dev); |
| 739 | |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 740 | bat_priv = netdev_priv(dev); |
| 741 | bat_priv->soft_iface = dev; |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 742 | |
| 743 | /* batadv_interface_stats() needs to be available as soon as |
| 744 | * register_netdevice() has been called |
| 745 | */ |
Sven Eckelmann | 6b5e971 | 2015-05-26 18:34:26 +0200 | [diff] [blame] | 746 | bat_priv->bat_counters = __alloc_percpu(cnt_len, __alignof__(u64)); |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 747 | if (!bat_priv->bat_counters) |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 748 | return -ENOMEM; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 749 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 750 | atomic_set(&bat_priv->aggregated_ogms, 1); |
| 751 | atomic_set(&bat_priv->bonding, 0); |
Antonio Quartulli | fa70655 | 2012-11-26 01:27:29 +0100 | [diff] [blame] | 752 | #ifdef CONFIG_BATMAN_ADV_BLA |
Sven Eckelmann | dab7b62 | 2015-02-18 18:20:24 +0100 | [diff] [blame] | 753 | atomic_set(&bat_priv->bridge_loop_avoidance, 1); |
Antonio Quartulli | fa70655 | 2012-11-26 01:27:29 +0100 | [diff] [blame] | 754 | #endif |
Antonio Quartulli | 33af49a | 2012-08-08 18:50:57 +0200 | [diff] [blame] | 755 | #ifdef CONFIG_BATMAN_ADV_DAT |
| 756 | atomic_set(&bat_priv->distributed_arp_table, 1); |
| 757 | #endif |
Linus LĂĽssing | 60432d7 | 2014-02-15 17:47:51 +0100 | [diff] [blame] | 758 | #ifdef CONFIG_BATMAN_ADV_MCAST |
Linus LĂĽssing | 1d8ab8d | 2014-02-15 17:47:52 +0100 | [diff] [blame] | 759 | atomic_set(&bat_priv->multicast_mode, 1); |
Linus LĂĽssing | 32e7274 | 2019-03-23 05:47:41 +0100 | [diff] [blame] | 760 | atomic_set(&bat_priv->multicast_fanout, 16); |
Linus LĂĽssing | ab49886 | 2014-02-15 17:47:53 +0100 | [diff] [blame] | 761 | atomic_set(&bat_priv->mcast.num_want_all_unsnoopables, 0); |
Linus LĂĽssing | 4c8755d | 2014-02-15 17:47:54 +0100 | [diff] [blame] | 762 | atomic_set(&bat_priv->mcast.num_want_all_ipv4, 0); |
| 763 | atomic_set(&bat_priv->mcast.num_want_all_ipv6, 0); |
Linus LĂĽssing | 60432d7 | 2014-02-15 17:47:51 +0100 | [diff] [blame] | 764 | #endif |
Antonio Quartulli | 3a24a63 | 2016-05-06 02:46:38 +0800 | [diff] [blame] | 765 | atomic_set(&bat_priv->gw.mode, BATADV_GW_MODE_OFF); |
Marek Lindner | 414254e | 2013-04-23 21:39:58 +0800 | [diff] [blame] | 766 | atomic_set(&bat_priv->gw.bandwidth_down, 100); |
| 767 | atomic_set(&bat_priv->gw.bandwidth_up, 20); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 768 | atomic_set(&bat_priv->orig_interval, 1000); |
Simon Wunderlich | e03366e | 2014-06-17 12:16:03 +0200 | [diff] [blame] | 769 | atomic_set(&bat_priv->hop_penalty, 30); |
Marek Lindner | 0c430d0 | 2012-12-16 13:53:15 +0800 | [diff] [blame] | 770 | #ifdef CONFIG_BATMAN_ADV_DEBUG |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 771 | atomic_set(&bat_priv->log_level, 0); |
Marek Lindner | 0c430d0 | 2012-12-16 13:53:15 +0800 | [diff] [blame] | 772 | #endif |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 773 | atomic_set(&bat_priv->fragmentation, 1); |
Marek Lindner | a19d3d8 | 2013-05-27 15:33:25 +0800 | [diff] [blame] | 774 | atomic_set(&bat_priv->packet_size_max, ETH_DATA_LEN); |
Sven Eckelmann | 42d0b04 | 2012-06-03 22:19:17 +0200 | [diff] [blame] | 775 | atomic_set(&bat_priv->bcast_queue_left, BATADV_BCAST_QUEUE_LEN); |
| 776 | atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 777 | |
Sven Eckelmann | 39c75a5 | 2012-06-03 22:19:22 +0200 | [diff] [blame] | 778 | atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 779 | atomic_set(&bat_priv->bcast_seqno, 1); |
Sven Eckelmann | 807736f | 2012-07-15 22:26:51 +0200 | [diff] [blame] | 780 | atomic_set(&bat_priv->tt.vn, 0); |
| 781 | atomic_set(&bat_priv->tt.local_changes, 0); |
| 782 | atomic_set(&bat_priv->tt.ogm_append_cnt, 0); |
| 783 | #ifdef CONFIG_BATMAN_ADV_BLA |
| 784 | atomic_set(&bat_priv->bla.num_requests, 0); |
| 785 | #endif |
Antonio Quartulli | 33a3bb4 | 2016-05-05 13:09:43 +0200 | [diff] [blame] | 786 | atomic_set(&bat_priv->tp_num, 0); |
| 787 | |
Sven Eckelmann | 807736f | 2012-07-15 22:26:51 +0200 | [diff] [blame] | 788 | bat_priv->tt.last_changeset = NULL; |
| 789 | bat_priv->tt.last_changeset_len = 0; |
Antonio Quartulli | c42edfe | 2013-11-16 12:03:47 +0100 | [diff] [blame] | 790 | bat_priv->isolation_mark = 0; |
| 791 | bat_priv->isolation_mark_mask = 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 792 | |
Martin Hundebøll | ee75ed8 | 2013-05-23 16:53:03 +0200 | [diff] [blame] | 793 | /* randomize initial seqno to avoid collision */ |
| 794 | get_random_bytes(&random_seqno, sizeof(random_seqno)); |
| 795 | atomic_set(&bat_priv->frag_seqno, random_seqno); |
| 796 | |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 797 | bat_priv->primary_if = NULL; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 798 | |
Martin Hundebøll | d353d8d | 2013-01-25 11:12:38 +0100 | [diff] [blame] | 799 | batadv_nc_init_bat_priv(bat_priv); |
| 800 | |
Sven Eckelmann | a5ad457 | 2020-10-11 12:25:24 +0200 | [diff] [blame] | 801 | if (!bat_priv->algo_ops) { |
| 802 | ret = batadv_algo_select(bat_priv, batadv_routing_algo); |
| 803 | if (ret < 0) |
| 804 | goto free_bat_counters; |
| 805 | } |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 806 | |
Sven Eckelmann | aff6f5a | 2020-08-17 13:42:29 +0200 | [diff] [blame] | 807 | ret = batadv_mesh_init(dev); |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 808 | if (ret < 0) |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 809 | goto free_bat_counters; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 810 | |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 811 | return 0; |
Sven Eckelmann | c6c8fea | 2010-12-13 11:19:28 +0000 | [diff] [blame] | 812 | |
Marek Lindner | 1c9b055 | 2012-06-23 11:47:53 +0200 | [diff] [blame] | 813 | free_bat_counters: |
| 814 | free_percpu(bat_priv->bat_counters); |
Martin Hundebøll | f69ae77 | 2013-04-17 21:13:16 +0200 | [diff] [blame] | 815 | bat_priv->bat_counters = NULL; |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 816 | |
| 817 | return ret; |
| 818 | } |
| 819 | |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 820 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 821 | * batadv_softif_slave_add() - Add a slave interface to a batadv_soft_interface |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 822 | * @dev: batadv_soft_interface used as master interface |
| 823 | * @slave_dev: net_device which should become the slave interface |
Sven Eckelmann | bad5680 | 2017-10-07 14:21:22 +0200 | [diff] [blame] | 824 | * @extack: extended ACK report struct |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 825 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 826 | * Return: 0 if successful or error otherwise. |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 827 | */ |
| 828 | static int batadv_softif_slave_add(struct net_device *dev, |
David Ahern | 33eaf2a | 2017-10-04 17:48:46 -0700 | [diff] [blame] | 829 | struct net_device *slave_dev, |
| 830 | struct netlink_ext_ack *extack) |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 831 | { |
| 832 | struct batadv_hard_iface *hard_iface; |
| 833 | int ret = -EINVAL; |
| 834 | |
| 835 | hard_iface = batadv_hardif_get_by_netdev(slave_dev); |
Marek Lindner | 00f548b | 2015-02-18 22:17:30 +0800 | [diff] [blame] | 836 | if (!hard_iface || hard_iface->soft_iface) |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 837 | goto out; |
| 838 | |
Sven Eckelmann | fa20560 | 2021-06-01 23:50:35 +0200 | [diff] [blame] | 839 | ret = batadv_hardif_enable_interface(hard_iface, dev); |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 840 | |
| 841 | out: |
Sven Eckelmann | 79a0bff | 2021-08-08 19:11:08 +0200 | [diff] [blame] | 842 | batadv_hardif_put(hard_iface); |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 843 | return ret; |
| 844 | } |
| 845 | |
| 846 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 847 | * batadv_softif_slave_del() - Delete a slave iface from a batadv_soft_interface |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 848 | * @dev: batadv_soft_interface used as master interface |
| 849 | * @slave_dev: net_device which should be removed from the master interface |
| 850 | * |
Sven Eckelmann | 62fe710 | 2015-09-15 19:00:48 +0200 | [diff] [blame] | 851 | * Return: 0 if successful or error otherwise. |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 852 | */ |
| 853 | static int batadv_softif_slave_del(struct net_device *dev, |
| 854 | struct net_device *slave_dev) |
| 855 | { |
| 856 | struct batadv_hard_iface *hard_iface; |
| 857 | int ret = -EINVAL; |
| 858 | |
| 859 | hard_iface = batadv_hardif_get_by_netdev(slave_dev); |
| 860 | |
| 861 | if (!hard_iface || hard_iface->soft_iface != dev) |
| 862 | goto out; |
| 863 | |
Sven Eckelmann | a962cb2 | 2020-08-17 14:37:13 +0200 | [diff] [blame] | 864 | batadv_hardif_disable_interface(hard_iface); |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 865 | ret = 0; |
| 866 | |
| 867 | out: |
Sven Eckelmann | 79a0bff | 2021-08-08 19:11:08 +0200 | [diff] [blame] | 868 | batadv_hardif_put(hard_iface); |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 869 | return ret; |
| 870 | } |
| 871 | |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 872 | static const struct net_device_ops batadv_netdev_ops = { |
| 873 | .ndo_init = batadv_softif_init_late, |
| 874 | .ndo_open = batadv_interface_open, |
| 875 | .ndo_stop = batadv_interface_release, |
| 876 | .ndo_get_stats = batadv_interface_stats, |
Antonio Quartulli | 5d2c05b | 2013-07-02 11:04:34 +0200 | [diff] [blame] | 877 | .ndo_vlan_rx_add_vid = batadv_interface_add_vid, |
| 878 | .ndo_vlan_rx_kill_vid = batadv_interface_kill_vid, |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 879 | .ndo_set_mac_address = batadv_interface_set_mac_addr, |
| 880 | .ndo_change_mtu = batadv_interface_change_mtu, |
Linus LĂĽssing | a4deee1a | 2013-05-26 17:56:07 +0200 | [diff] [blame] | 881 | .ndo_set_rx_mode = batadv_interface_set_rx_mode, |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 882 | .ndo_start_xmit = batadv_interface_tx, |
Sven Eckelmann | 3dbd550 | 2013-02-11 17:10:27 +0800 | [diff] [blame] | 883 | .ndo_validate_addr = eth_validate_addr, |
| 884 | .ndo_add_slave = batadv_softif_slave_add, |
| 885 | .ndo_del_slave = batadv_softif_slave_del, |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 886 | }; |
| 887 | |
Sven Eckelmann | 5405e19 | 2017-04-01 14:47:06 +0200 | [diff] [blame] | 888 | static void batadv_get_drvinfo(struct net_device *dev, |
| 889 | struct ethtool_drvinfo *info) |
| 890 | { |
Sven Eckelmann | 529a8f9 | 2019-07-06 14:56:13 +0200 | [diff] [blame] | 891 | strscpy(info->driver, "B.A.T.M.A.N. advanced", sizeof(info->driver)); |
| 892 | strscpy(info->version, BATADV_SOURCE_VERSION, sizeof(info->version)); |
| 893 | strscpy(info->fw_version, "N/A", sizeof(info->fw_version)); |
| 894 | strscpy(info->bus_info, "batman", sizeof(info->bus_info)); |
Sven Eckelmann | 5405e19 | 2017-04-01 14:47:06 +0200 | [diff] [blame] | 895 | } |
| 896 | |
| 897 | /* Inspired by drivers/net/ethernet/dlink/sundance.c:1702 |
| 898 | * Declare each description string in struct.name[] to get fixed sized buffer |
| 899 | * and compile time checking for strings longer than ETH_GSTRING_LEN. |
| 900 | */ |
| 901 | static const struct { |
| 902 | const char name[ETH_GSTRING_LEN]; |
| 903 | } batadv_counters_strings[] = { |
| 904 | { "tx" }, |
| 905 | { "tx_bytes" }, |
| 906 | { "tx_dropped" }, |
| 907 | { "rx" }, |
| 908 | { "rx_bytes" }, |
| 909 | { "forward" }, |
| 910 | { "forward_bytes" }, |
| 911 | { "mgmt_tx" }, |
| 912 | { "mgmt_tx_bytes" }, |
| 913 | { "mgmt_rx" }, |
| 914 | { "mgmt_rx_bytes" }, |
| 915 | { "frag_tx" }, |
| 916 | { "frag_tx_bytes" }, |
| 917 | { "frag_rx" }, |
| 918 | { "frag_rx_bytes" }, |
| 919 | { "frag_fwd" }, |
| 920 | { "frag_fwd_bytes" }, |
| 921 | { "tt_request_tx" }, |
| 922 | { "tt_request_rx" }, |
| 923 | { "tt_response_tx" }, |
| 924 | { "tt_response_rx" }, |
| 925 | { "tt_roam_adv_tx" }, |
| 926 | { "tt_roam_adv_rx" }, |
| 927 | #ifdef CONFIG_BATMAN_ADV_DAT |
| 928 | { "dat_get_tx" }, |
| 929 | { "dat_get_rx" }, |
| 930 | { "dat_put_tx" }, |
| 931 | { "dat_put_rx" }, |
| 932 | { "dat_cached_reply_tx" }, |
| 933 | #endif |
| 934 | #ifdef CONFIG_BATMAN_ADV_NC |
| 935 | { "nc_code" }, |
| 936 | { "nc_code_bytes" }, |
| 937 | { "nc_recode" }, |
| 938 | { "nc_recode_bytes" }, |
| 939 | { "nc_buffer" }, |
| 940 | { "nc_decode" }, |
| 941 | { "nc_decode_bytes" }, |
| 942 | { "nc_decode_failed" }, |
| 943 | { "nc_sniffed" }, |
| 944 | #endif |
| 945 | }; |
| 946 | |
| 947 | static void batadv_get_strings(struct net_device *dev, u32 stringset, u8 *data) |
| 948 | { |
| 949 | if (stringset == ETH_SS_STATS) |
| 950 | memcpy(data, batadv_counters_strings, |
| 951 | sizeof(batadv_counters_strings)); |
| 952 | } |
| 953 | |
| 954 | static void batadv_get_ethtool_stats(struct net_device *dev, |
| 955 | struct ethtool_stats *stats, u64 *data) |
| 956 | { |
| 957 | struct batadv_priv *bat_priv = netdev_priv(dev); |
| 958 | int i; |
| 959 | |
| 960 | for (i = 0; i < BATADV_CNT_NUM; i++) |
| 961 | data[i] = batadv_sum_counter(bat_priv, i); |
| 962 | } |
| 963 | |
| 964 | static int batadv_get_sset_count(struct net_device *dev, int stringset) |
| 965 | { |
| 966 | if (stringset == ETH_SS_STATS) |
| 967 | return BATADV_CNT_NUM; |
| 968 | |
| 969 | return -EOPNOTSUPP; |
| 970 | } |
| 971 | |
| 972 | static const struct ethtool_ops batadv_ethtool_ops = { |
| 973 | .get_drvinfo = batadv_get_drvinfo, |
| 974 | .get_link = ethtool_op_get_link, |
| 975 | .get_strings = batadv_get_strings, |
| 976 | .get_ethtool_stats = batadv_get_ethtool_stats, |
| 977 | .get_sset_count = batadv_get_sset_count, |
| 978 | }; |
| 979 | |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 980 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 981 | * batadv_softif_free() - Deconstructor of batadv_soft_interface |
Sven Eckelmann | b324602 | 2013-02-11 17:10:23 +0800 | [diff] [blame] | 982 | * @dev: Device to cleanup and remove |
| 983 | */ |
| 984 | static void batadv_softif_free(struct net_device *dev) |
| 985 | { |
Sven Eckelmann | b324602 | 2013-02-11 17:10:23 +0800 | [diff] [blame] | 986 | batadv_mesh_free(dev); |
Antonio Quartulli | 0c50134 | 2013-04-19 11:04:52 +0200 | [diff] [blame] | 987 | |
| 988 | /* some scheduled RCU callbacks need the bat_priv struct to accomplish |
| 989 | * their tasks. Wait for them all to be finished before freeing the |
| 990 | * netdev and its private data (bat_priv) |
| 991 | */ |
| 992 | rcu_barrier(); |
Sven Eckelmann | b324602 | 2013-02-11 17:10:23 +0800 | [diff] [blame] | 993 | } |
| 994 | |
| 995 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 996 | * batadv_softif_init_early() - early stage initialization of soft interface |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 997 | * @dev: registered network device to modify |
| 998 | */ |
| 999 | static void batadv_softif_init_early(struct net_device *dev) |
| 1000 | { |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 1001 | ether_setup(dev); |
| 1002 | |
| 1003 | dev->netdev_ops = &batadv_netdev_ops; |
David S. Miller | cf124db | 2017-05-08 12:52:56 -0400 | [diff] [blame] | 1004 | dev->needs_free_netdev = true; |
| 1005 | dev->priv_destructor = batadv_softif_free; |
Andrew Lunn | 0d21cda | 2016-03-01 22:19:05 +0100 | [diff] [blame] | 1006 | dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_NETNS_LOCAL; |
Sven Eckelmann | a7ea49a | 2018-09-11 17:22:01 +0200 | [diff] [blame] | 1007 | dev->features |= NETIF_F_LLTX; |
Phil Sutter | cdf7370 | 2015-08-18 10:30:44 +0200 | [diff] [blame] | 1008 | dev->priv_flags |= IFF_NO_QUEUE; |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 1009 | |
| 1010 | /* can't call min_mtu, because the needed variables |
| 1011 | * have not been initialized yet |
| 1012 | */ |
| 1013 | dev->mtu = ETH_DATA_LEN; |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 1014 | |
| 1015 | /* generate random address */ |
| 1016 | eth_hw_addr_random(dev); |
| 1017 | |
Wilfried Klaebe | 7ad24ea | 2014-05-11 00:12:32 +0000 | [diff] [blame] | 1018 | dev->ethtool_ops = &batadv_ethtool_ops; |
Sven Eckelmann | 3713029 | 2013-02-11 17:10:22 +0800 | [diff] [blame] | 1019 | } |
| 1020 | |
Sven Eckelmann | ff15c27 | 2017-12-02 19:51:53 +0100 | [diff] [blame] | 1021 | /** |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 1022 | * batadv_softif_validate() - validate configuration of new batadv link |
| 1023 | * @tb: IFLA_INFO_DATA netlink attributes |
| 1024 | * @data: enum batadv_ifla_attrs attributes |
| 1025 | * @extack: extended ACK report struct |
| 1026 | * |
| 1027 | * Return: 0 if successful or error otherwise. |
| 1028 | */ |
| 1029 | static int batadv_softif_validate(struct nlattr *tb[], struct nlattr *data[], |
| 1030 | struct netlink_ext_ack *extack) |
| 1031 | { |
Sven Eckelmann | a5ad457 | 2020-10-11 12:25:24 +0200 | [diff] [blame] | 1032 | struct batadv_algo_ops *algo_ops; |
| 1033 | |
| 1034 | if (!data) |
| 1035 | return 0; |
| 1036 | |
| 1037 | if (data[IFLA_BATADV_ALGO_NAME]) { |
| 1038 | algo_ops = batadv_algo_get(nla_data(data[IFLA_BATADV_ALGO_NAME])); |
| 1039 | if (!algo_ops) |
| 1040 | return -EINVAL; |
| 1041 | } |
| 1042 | |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 1043 | return 0; |
| 1044 | } |
| 1045 | |
| 1046 | /** |
| 1047 | * batadv_softif_newlink() - pre-initialize and register new batadv link |
| 1048 | * @src_net: the applicable net namespace |
| 1049 | * @dev: network device to register |
| 1050 | * @tb: IFLA_INFO_DATA netlink attributes |
| 1051 | * @data: enum batadv_ifla_attrs attributes |
| 1052 | * @extack: extended ACK report struct |
| 1053 | * |
| 1054 | * Return: 0 if successful or error otherwise. |
| 1055 | */ |
| 1056 | static int batadv_softif_newlink(struct net *src_net, struct net_device *dev, |
| 1057 | struct nlattr *tb[], struct nlattr *data[], |
| 1058 | struct netlink_ext_ack *extack) |
| 1059 | { |
Sven Eckelmann | a5ad457 | 2020-10-11 12:25:24 +0200 | [diff] [blame] | 1060 | struct batadv_priv *bat_priv = netdev_priv(dev); |
| 1061 | const char *algo_name; |
| 1062 | int err; |
| 1063 | |
| 1064 | if (data && data[IFLA_BATADV_ALGO_NAME]) { |
| 1065 | algo_name = nla_data(data[IFLA_BATADV_ALGO_NAME]); |
| 1066 | err = batadv_algo_select(bat_priv, algo_name); |
| 1067 | if (err) |
| 1068 | return -EINVAL; |
| 1069 | } |
| 1070 | |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 1071 | return register_netdevice(dev); |
| 1072 | } |
| 1073 | |
| 1074 | /** |
Sven Eckelmann | 7e9a8c2 | 2017-12-02 19:51:47 +0100 | [diff] [blame] | 1075 | * batadv_softif_destroy_netlink() - deletion of batadv_soft_interface via |
| 1076 | * netlink |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1077 | * @soft_iface: the to-be-removed batman-adv interface |
| 1078 | * @head: list pointer |
| 1079 | */ |
| 1080 | static void batadv_softif_destroy_netlink(struct net_device *soft_iface, |
| 1081 | struct list_head *head) |
| 1082 | { |
Sven Eckelmann | 420cb1b | 2016-06-26 11:16:13 +0200 | [diff] [blame] | 1083 | struct batadv_priv *bat_priv = netdev_priv(soft_iface); |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1084 | struct batadv_hard_iface *hard_iface; |
Sven Eckelmann | 420cb1b | 2016-06-26 11:16:13 +0200 | [diff] [blame] | 1085 | struct batadv_softif_vlan *vlan; |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1086 | |
| 1087 | list_for_each_entry(hard_iface, &batadv_hardif_list, list) { |
| 1088 | if (hard_iface->soft_iface == soft_iface) |
Sven Eckelmann | a962cb2 | 2020-08-17 14:37:13 +0200 | [diff] [blame] | 1089 | batadv_hardif_disable_interface(hard_iface); |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1090 | } |
| 1091 | |
Sven Eckelmann | 420cb1b | 2016-06-26 11:16:13 +0200 | [diff] [blame] | 1092 | /* destroy the "untagged" VLAN */ |
| 1093 | vlan = batadv_softif_vlan_get(bat_priv, BATADV_NO_FLAGS); |
| 1094 | if (vlan) { |
| 1095 | batadv_softif_destroy_vlan(bat_priv, vlan); |
| 1096 | batadv_softif_vlan_put(vlan); |
| 1097 | } |
| 1098 | |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1099 | unregister_netdevice_queue(soft_iface, head); |
| 1100 | } |
| 1101 | |
Sven Eckelmann | ff15c27 | 2017-12-02 19:51:53 +0100 | [diff] [blame] | 1102 | /** |
| 1103 | * batadv_softif_is_valid() - Check whether device is a batadv soft interface |
| 1104 | * @net_dev: device which should be checked |
| 1105 | * |
| 1106 | * Return: true when net_dev is a batman-adv interface, false otherwise |
| 1107 | */ |
Sven Eckelmann | 4b426b1 | 2016-02-22 21:02:39 +0100 | [diff] [blame] | 1108 | bool batadv_softif_is_valid(const struct net_device *net_dev) |
Sven Eckelmann | e44d8fe | 2011-03-04 21:36:41 +0000 | [diff] [blame] | 1109 | { |
Sven Eckelmann | 0294ca0 | 2012-05-16 20:23:15 +0200 | [diff] [blame] | 1110 | if (net_dev->netdev_ops->ndo_start_xmit == batadv_interface_tx) |
Sven Eckelmann | 4b426b1 | 2016-02-22 21:02:39 +0100 | [diff] [blame] | 1111 | return true; |
Sven Eckelmann | e44d8fe | 2011-03-04 21:36:41 +0000 | [diff] [blame] | 1112 | |
Sven Eckelmann | 4b426b1 | 2016-02-22 21:02:39 +0100 | [diff] [blame] | 1113 | return false; |
Sven Eckelmann | e44d8fe | 2011-03-04 21:36:41 +0000 | [diff] [blame] | 1114 | } |
| 1115 | |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 1116 | static const struct nla_policy batadv_ifla_policy[IFLA_BATADV_MAX + 1] = { |
Sven Eckelmann | a5ad457 | 2020-10-11 12:25:24 +0200 | [diff] [blame] | 1117 | [IFLA_BATADV_ALGO_NAME] = { .type = NLA_NUL_STRING }, |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 1118 | }; |
| 1119 | |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1120 | struct rtnl_link_ops batadv_link_ops __read_mostly = { |
| 1121 | .kind = "batadv", |
| 1122 | .priv_size = sizeof(struct batadv_priv), |
| 1123 | .setup = batadv_softif_init_early, |
Sven Eckelmann | 128254c | 2020-10-11 12:25:23 +0200 | [diff] [blame] | 1124 | .maxtype = IFLA_BATADV_MAX, |
| 1125 | .policy = batadv_ifla_policy, |
| 1126 | .validate = batadv_softif_validate, |
| 1127 | .newlink = batadv_softif_newlink, |
Sven Eckelmann | a4ac28c | 2013-02-11 17:10:26 +0800 | [diff] [blame] | 1128 | .dellink = batadv_softif_destroy_netlink, |
| 1129 | }; |