blob: 3ddd66e4c29ef532de33c7182cc0cc470b33fd4f [file] [log] [blame]
Sven Eckelmann7db7d9f2017-11-19 15:05:11 +01001// SPDX-License-Identifier: GPL-2.0
Sven Eckelmanncfa55c62021-01-01 00:00:01 +01002/* Copyright (C) B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00005 */
6
7#include "main.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +02008
9#include <linux/atomic.h>
Sven Eckelmannecc36f52017-11-19 17:12:03 +010010#include <linux/build_bug.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020011#include <linux/byteorder/generic.h>
12#include <linux/crc32c.h>
Sven Eckelmann0fa4c302019-03-03 18:02:57 +010013#include <linux/device.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020014#include <linux/errno.h>
Sven Eckelmann2c0c06f2016-10-29 13:56:23 +020015#include <linux/genetlink.h>
Sven Eckelmannb92b94a2017-11-19 17:12:02 +010016#include <linux/gfp.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020017#include <linux/if_ether.h>
18#include <linux/if_vlan.h>
19#include <linux/init.h>
20#include <linux/ip.h>
21#include <linux/ipv6.h>
22#include <linux/kernel.h>
Sven Eckelmann0fa4c302019-03-03 18:02:57 +010023#include <linux/kobject.h>
Sven Eckelmannf7157dd2016-01-16 10:29:48 +010024#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020025#include <linux/list.h>
Sven Eckelmannfcd193e2020-10-26 21:01:59 +010026#include <linux/minmax.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020027#include <linux/module.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020028#include <linux/netdevice.h>
Sven Eckelmannba412082016-05-15 23:48:31 +020029#include <linux/printk.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020030#include <linux/rculist.h>
31#include <linux/rcupdate.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020032#include <linux/skbuff.h>
Sven Eckelmann0fa4c302019-03-03 18:02:57 +010033#include <linux/slab.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020034#include <linux/spinlock.h>
35#include <linux/stddef.h>
36#include <linux/string.h>
37#include <linux/workqueue.h>
38#include <net/dsfield.h>
39#include <net/rtnetlink.h>
Sven Eckelmannfec149f2017-12-21 10:17:41 +010040#include <uapi/linux/batadv_packet.h>
Sven Eckelmann2c0c06f2016-10-29 13:56:23 +020041#include <uapi/linux/batman_adv.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042
43#include "bat_algo.h"
Sven Eckelmanna2d08162016-05-15 11:07:46 +020044#include "bat_iv_ogm.h"
45#include "bat_v.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020046#include "bridge_loop_avoidance.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020047#include "distributed-arp-table.h"
48#include "gateway_client.h"
49#include "gateway_common.h"
50#include "hard-interface.h"
Sven Eckelmannba412082016-05-15 23:48:31 +020051#include "log.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020052#include "multicast.h"
Matthias Schiffer09748a22016-05-09 18:41:08 +020053#include "netlink.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020054#include "network-coding.h"
55#include "originator.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000056#include "routing.h"
57#include "send.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000058#include "soft-interface.h"
Antonio Quartulli33a3bb42016-05-05 13:09:43 +020059#include "tp_meter.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000061
Sven Eckelmannc3caf512011-05-03 11:51:38 +020062/* List manipulations on hardif_list have to be rtnl_lock()'ed,
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +020063 * list traversals just rcu-locked
64 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020065struct list_head batadv_hardif_list;
Sven Eckelmannfb69be62018-10-30 22:01:24 +010066unsigned int batadv_hardif_generation;
Sven Eckelmann706cc9f2017-09-30 09:24:28 +020067static int (*batadv_rx_handler[256])(struct sk_buff *skb,
68 struct batadv_hard_iface *recv_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000069
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020070unsigned char batadv_broadcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000071
Sven Eckelmann3193e8f2012-05-12 02:09:42 +020072struct workqueue_struct *batadv_event_workqueue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000073
Sven Eckelmannee11ad62012-05-16 20:23:19 +020074static void batadv_recv_handler_init(void);
Marek Lindnerffa995e2012-03-01 15:35:17 +080075
Sven Eckelmann0fa4c302019-03-03 18:02:57 +010076#define BATADV_UEV_TYPE_VAR "BATTYPE="
77#define BATADV_UEV_ACTION_VAR "BATACTION="
78#define BATADV_UEV_DATA_VAR "BATDATA="
79
80static char *batadv_uev_action_str[] = {
81 "add",
82 "del",
83 "change",
84 "loopdetect",
85};
86
87static char *batadv_uev_type_str[] = {
88 "gw",
89 "bla",
90};
91
Sven Eckelmannee11ad62012-05-16 20:23:19 +020092static int __init batadv_init(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000093{
Sven Eckelmann86452f82016-06-25 16:44:06 +020094 int ret;
95
96 ret = batadv_tt_cache_init();
97 if (ret < 0)
98 return ret;
99
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200100 INIT_LIST_HEAD(&batadv_hardif_list);
Sven Eckelmann01d350d2016-05-15 11:07:44 +0200101 batadv_algo_init();
Marek Lindner1c280472011-11-28 17:40:17 +0800102
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200103 batadv_recv_handler_init();
Marek Lindnerffa995e2012-03-01 15:35:17 +0800104
Linus Luessingd6f94d92016-01-16 16:40:09 +0800105 batadv_v_init();
Sven Eckelmann81c524f2012-05-12 02:09:22 +0200106 batadv_iv_init();
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200107 batadv_nc_init();
Antonio Quartulli33a3bb42016-05-05 13:09:43 +0200108 batadv_tp_meter_init();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000109
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200110 batadv_event_workqueue = create_singlethread_workqueue("bat_events");
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200111 if (!batadv_event_workqueue)
Sven Eckelmann86452f82016-06-25 16:44:06 +0200112 goto err_create_wq;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000113
Sven Eckelmann95638772012-05-12 02:09:31 +0200114 register_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800115 rtnl_link_register(&batadv_link_ops);
Matthias Schiffer09748a22016-05-09 18:41:08 +0200116 batadv_netlink_register();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000117
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100118 pr_info("B.A.T.M.A.N. advanced %s (compatibility version %i) loaded\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200119 BATADV_SOURCE_VERSION, BATADV_COMPAT_VERSION);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000120
121 return 0;
Sven Eckelmann86452f82016-06-25 16:44:06 +0200122
123err_create_wq:
124 batadv_tt_cache_destroy();
125
126 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000127}
128
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200129static void __exit batadv_exit(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000130{
Matthias Schiffer09748a22016-05-09 18:41:08 +0200131 batadv_netlink_unregister();
Sven Eckelmanna4ac28c2013-02-11 17:10:26 +0800132 rtnl_link_unregister(&batadv_link_ops);
Sven Eckelmann95638772012-05-12 02:09:31 +0200133 unregister_netdevice_notifier(&batadv_hard_if_notifier);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000134
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200135 flush_workqueue(batadv_event_workqueue);
136 destroy_workqueue(batadv_event_workqueue);
137 batadv_event_workqueue = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000138
139 rcu_barrier();
Sven Eckelmann86452f82016-06-25 16:44:06 +0200140
141 batadv_tt_cache_destroy();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000142}
143
Sven Eckelmannff15c272017-12-02 19:51:53 +0100144/**
145 * batadv_mesh_init() - Initialize soft interface
146 * @soft_iface: netdev struct of the soft interface
147 *
148 * Return: 0 on success or negative error number in case of failure
149 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200150int batadv_mesh_init(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000151{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200152 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200153 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000155 spin_lock_init(&bat_priv->forw_bat_list_lock);
156 spin_lock_init(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200157 spin_lock_init(&bat_priv->tt.changes_list_lock);
158 spin_lock_init(&bat_priv->tt.req_list_lock);
159 spin_lock_init(&bat_priv->tt.roam_list_lock);
160 spin_lock_init(&bat_priv->tt.last_changeset_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200161 spin_lock_init(&bat_priv->tt.commit_lock);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200162 spin_lock_init(&bat_priv->gw.list_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100163#ifdef CONFIG_BATMAN_ADV_MCAST
Linus Lüssinga3c7cd02019-04-24 03:19:14 +0200164 spin_lock_init(&bat_priv->mcast.mla_lock);
Linus Lüssingab498862014-02-15 17:47:53 +0100165 spin_lock_init(&bat_priv->mcast.want_lists_lock);
166#endif
Marek Lindneref261572013-04-23 21:39:57 +0800167 spin_lock_init(&bat_priv->tvlv.container_list_lock);
168 spin_lock_init(&bat_priv->tvlv.handler_list_lock);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200169 spin_lock_init(&bat_priv->softif_vlan_list_lock);
Antonio Quartulli33a3bb42016-05-05 13:09:43 +0200170 spin_lock_init(&bat_priv->tp_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000171
172 INIT_HLIST_HEAD(&bat_priv->forw_bat_list);
173 INIT_HLIST_HEAD(&bat_priv->forw_bcast_list);
Sven Eckelmann70ea5ce2016-07-27 12:31:08 +0200174 INIT_HLIST_HEAD(&bat_priv->gw.gateway_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100175#ifdef CONFIG_BATMAN_ADV_MCAST
176 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_unsnoopables_list);
Linus Lüssing4c8755d2014-02-15 17:47:54 +0100177 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv4_list);
178 INIT_HLIST_HEAD(&bat_priv->mcast.want_all_ipv6_list);
Linus Lüssingab498862014-02-15 17:47:53 +0100179#endif
Sven Eckelmann807736f2012-07-15 22:26:51 +0200180 INIT_LIST_HEAD(&bat_priv->tt.changes_list);
Marek Lindner7c26a532015-06-28 22:16:06 +0800181 INIT_HLIST_HEAD(&bat_priv->tt.req_list);
Sven Eckelmann807736f2012-07-15 22:26:51 +0200182 INIT_LIST_HEAD(&bat_priv->tt.roam_list);
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100183#ifdef CONFIG_BATMAN_ADV_MCAST
184 INIT_HLIST_HEAD(&bat_priv->mcast.mla_list);
185#endif
Marek Lindneref261572013-04-23 21:39:57 +0800186 INIT_HLIST_HEAD(&bat_priv->tvlv.container_list);
187 INIT_HLIST_HEAD(&bat_priv->tvlv.handler_list);
Antonio Quartulli5d2c05b2013-07-02 11:04:34 +0200188 INIT_HLIST_HEAD(&bat_priv->softif_vlan_list);
Antonio Quartulli33a3bb42016-05-05 13:09:43 +0200189 INIT_HLIST_HEAD(&bat_priv->tp_list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
Sven Eckelmann9264c852018-10-30 22:01:23 +0100191 bat_priv->gw.generation = 0;
192
Antonio Quartulli0da00352016-01-16 16:40:12 +0800193 ret = batadv_v_mesh_init(bat_priv);
194 if (ret < 0)
195 goto err;
196
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200197 ret = batadv_originator_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200198 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000199 goto err;
200
Sven Eckelmann08c36d32012-05-12 02:09:39 +0200201 ret = batadv_tt_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200202 if (ret < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000203 goto err;
204
Sven Eckelmann08adf152012-05-12 13:38:47 +0200205 ret = batadv_bla_init(bat_priv);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200206 if (ret < 0)
Simon Wunderlich23721382012-01-22 20:00:19 +0100207 goto err;
208
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200209 ret = batadv_dat_init(bat_priv);
210 if (ret < 0)
211 goto err;
212
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200213 ret = batadv_nc_mesh_init(bat_priv);
Martin Hundebølld353d8d2013-01-25 11:12:38 +0100214 if (ret < 0)
215 goto err;
216
Marek Lindner414254e2013-04-23 21:39:58 +0800217 batadv_gw_init(bat_priv);
Linus Lüssing60432d72014-02-15 17:47:51 +0100218 batadv_mcast_init(bat_priv);
Marek Lindner414254e2013-04-23 21:39:58 +0800219
Sven Eckelmann807736f2012-07-15 22:26:51 +0200220 atomic_set(&bat_priv->gw.reselect, 0);
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200221 atomic_set(&bat_priv->mesh_state, BATADV_MESH_ACTIVE);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200222
223 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000224
225err:
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200226 batadv_mesh_free(soft_iface);
Sven Eckelmann5346c352012-05-05 13:27:28 +0200227 return ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000228}
229
Sven Eckelmannff15c272017-12-02 19:51:53 +0100230/**
231 * batadv_mesh_free() - Deinitialize soft interface
232 * @soft_iface: netdev struct of the soft interface
233 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200234void batadv_mesh_free(struct net_device *soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000235{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200236 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000237
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200238 atomic_set(&bat_priv->mesh_state, BATADV_MESH_DEACTIVATING);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000239
Sven Eckelmann9455e342012-05-12 02:09:37 +0200240 batadv_purge_outstanding_packets(bat_priv, NULL);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000241
Simon Wunderlichbd3524c2015-08-03 19:13:58 +0200242 batadv_gw_node_free(bat_priv);
Antonio Quartulli0da00352016-01-16 16:40:12 +0800243
244 batadv_v_mesh_free(bat_priv);
Matthias Schiffer6c519ba2013-09-27 18:03:39 +0200245 batadv_nc_mesh_free(bat_priv);
Antonio Quartullia4361862013-05-07 01:06:18 +0200246 batadv_dat_free(bat_priv);
Sven Eckelmann08adf152012-05-12 13:38:47 +0200247 batadv_bla_free(bat_priv);
Simon Wunderlich23721382012-01-22 20:00:19 +0100248
Linus Lüssingc5caf4e2014-02-15 17:47:49 +0100249 batadv_mcast_free(bat_priv);
250
Antonio Quartullia4361862013-05-07 01:06:18 +0200251 /* Free the TT and the originator tables only after having terminated
252 * all the other depending components which may use these structures for
253 * their purposes.
254 */
255 batadv_tt_free(bat_priv);
256
257 /* Since the originator table clean up routine is accessing the TT
258 * tables as well, it has to be invoked after the TT tables have been
259 * freed and marked as empty. This ensures that no cleanup RCU callbacks
260 * accessing the TT data are scheduled for later execution.
261 */
262 batadv_originator_free(bat_priv);
Antonio Quartulli2f1dfbe2012-06-30 20:01:19 +0200263
Marek Lindner414254e2013-04-23 21:39:58 +0800264 batadv_gw_free(bat_priv);
265
Martin Hundebøllf8214862012-04-20 17:02:45 +0200266 free_percpu(bat_priv->bat_counters);
Martin Hundebøllf69ae772013-04-17 21:13:16 +0200267 bat_priv->bat_counters = NULL;
Martin Hundebøllf8214862012-04-20 17:02:45 +0200268
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200269 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000270}
271
David S. Miller6e0895c2013-04-22 20:32:51 -0400272/**
Sven Eckelmann7e9a8c22017-12-02 19:51:47 +0100273 * batadv_is_my_mac() - check if the given mac address belongs to any of the
274 * real interfaces in the current mesh
David S. Miller6e0895c2013-04-22 20:32:51 -0400275 * @bat_priv: the bat priv with all the soft interface information
276 * @addr: the address to check
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100277 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200278 * Return: 'true' if the mac address was found, false otherwise.
David S. Miller6e0895c2013-04-22 20:32:51 -0400279 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200280bool batadv_is_my_mac(struct batadv_priv *bat_priv, const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000281{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200282 const struct batadv_hard_iface *hard_iface;
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100283 bool is_my_mac = false;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000284
285 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200286 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200287 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000288 continue;
289
Antonio Quartullife8a93b2013-04-03 19:10:26 +0200290 if (hard_iface->soft_iface != bat_priv->soft_iface)
291 continue;
292
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200293 if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100294 is_my_mac = true;
295 break;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000296 }
297 }
298 rcu_read_unlock();
Markus Pargmanne8ad3b12014-12-26 12:41:38 +0100299 return is_my_mac;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000300}
301
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200302/**
Sven Eckelmann7e9a8c22017-12-02 19:51:47 +0100303 * batadv_max_header_len() - calculate maximum encapsulation overhead for a
Marek Lindner411d6ed2013-05-08 13:31:59 +0800304 * payload packet
305 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200306 * Return: the maximum encapsulation overhead in bytes.
Marek Lindner411d6ed2013-05-08 13:31:59 +0800307 */
308int batadv_max_header_len(void)
309{
310 int header_len = 0;
311
312 header_len = max_t(int, header_len,
313 sizeof(struct batadv_unicast_packet));
314 header_len = max_t(int, header_len,
315 sizeof(struct batadv_unicast_4addr_packet));
316 header_len = max_t(int, header_len,
317 sizeof(struct batadv_bcast_packet));
318
319#ifdef CONFIG_BATMAN_ADV_NC
320 header_len = max_t(int, header_len,
321 sizeof(struct batadv_coded_packet));
322#endif
323
Marek Lindner1df0cbd2014-01-15 20:31:18 +0800324 return header_len + ETH_HLEN;
Marek Lindner411d6ed2013-05-08 13:31:59 +0800325}
326
327/**
Sven Eckelmann7e9a8c22017-12-02 19:51:47 +0100328 * batadv_skb_set_priority() - sets skb priority according to packet content
Simon Wunderlichc54f38c2013-07-29 17:56:44 +0200329 * @skb: the packet to be sent
330 * @offset: offset to the packet content
331 *
332 * This function sets a value between 256 and 263 (802.1d priority), which
333 * can be interpreted by the cfg80211 or other drivers.
334 */
335void batadv_skb_set_priority(struct sk_buff *skb, int offset)
336{
337 struct iphdr ip_hdr_tmp, *ip_hdr;
338 struct ipv6hdr ip6_hdr_tmp, *ip6_hdr;
339 struct ethhdr ethhdr_tmp, *ethhdr;
340 struct vlan_ethhdr *vhdr, vhdr_tmp;
341 u32 prio;
342
343 /* already set, do nothing */
344 if (skb->priority >= 256 && skb->priority <= 263)
345 return;
346
347 ethhdr = skb_header_pointer(skb, offset, sizeof(*ethhdr), &ethhdr_tmp);
348 if (!ethhdr)
349 return;
350
351 switch (ethhdr->h_proto) {
352 case htons(ETH_P_8021Q):
353 vhdr = skb_header_pointer(skb, offset + sizeof(*vhdr),
354 sizeof(*vhdr), &vhdr_tmp);
355 if (!vhdr)
356 return;
357 prio = ntohs(vhdr->h_vlan_TCI) & VLAN_PRIO_MASK;
358 prio = prio >> VLAN_PRIO_SHIFT;
359 break;
360 case htons(ETH_P_IP):
361 ip_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
362 sizeof(*ip_hdr), &ip_hdr_tmp);
363 if (!ip_hdr)
364 return;
365 prio = (ipv4_get_dsfield(ip_hdr) & 0xfc) >> 5;
366 break;
367 case htons(ETH_P_IPV6):
368 ip6_hdr = skb_header_pointer(skb, offset + sizeof(*ethhdr),
369 sizeof(*ip6_hdr), &ip6_hdr_tmp);
370 if (!ip6_hdr)
371 return;
372 prio = (ipv6_get_dsfield(ip6_hdr) & 0xfc) >> 5;
373 break;
374 default:
375 return;
376 }
377
378 skb->priority = prio + 256;
379}
380
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200381static int batadv_recv_unhandled_packet(struct sk_buff *skb,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200382 struct batadv_hard_iface *recv_if)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800383{
Sven Eckelmannb91a2542016-07-17 21:04:04 +0200384 kfree_skb(skb);
385
Marek Lindnerffa995e2012-03-01 15:35:17 +0800386 return NET_RX_DROP;
387}
388
389/* incoming packets with the batman ethertype received on any active hard
390 * interface
391 */
Sven Eckelmannff15c272017-12-02 19:51:53 +0100392
393/**
394 * batadv_batman_skb_recv() - Handle incoming message from an hard interface
395 * @skb: the received packet
396 * @dev: the net device that the packet was received on
397 * @ptype: packet type of incoming packet (ETH_P_BATMAN)
398 * @orig_dev: the original receive net device (e.g. bonded device)
399 *
400 * Return: NET_RX_SUCCESS on success or NET_RX_DROP in case of failure
401 */
Sven Eckelmann3193e8f2012-05-12 02:09:42 +0200402int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
403 struct packet_type *ptype,
404 struct net_device *orig_dev)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800405{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200406 struct batadv_priv *bat_priv;
Sven Eckelmann96412692012-06-05 22:31:30 +0200407 struct batadv_ogm_packet *batadv_ogm_packet;
Sven Eckelmann56303d32012-06-05 22:31:31 +0200408 struct batadv_hard_iface *hard_iface;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200409 u8 idx;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800410
Sven Eckelmann56303d32012-06-05 22:31:31 +0200411 hard_iface = container_of(ptype, struct batadv_hard_iface,
412 batman_adv_ptype);
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100413
414 /* Prevent processing a packet received on an interface which is getting
415 * shut down otherwise the packet may trigger de-reference errors
416 * further down in the receive path.
417 */
418 if (!kref_get_unless_zero(&hard_iface->refcount))
419 goto err_out;
420
Marek Lindnerffa995e2012-03-01 15:35:17 +0800421 skb = skb_share_check(skb, GFP_ATOMIC);
422
423 /* skb was released by skb_share_check() */
424 if (!skb)
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100425 goto err_put;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800426
427 /* packet should hold at least type and version */
428 if (unlikely(!pskb_may_pull(skb, 2)))
429 goto err_free;
430
431 /* expect a valid ethernet header here. */
432 if (unlikely(skb->mac_len != ETH_HLEN || !skb_mac_header(skb)))
433 goto err_free;
434
435 if (!hard_iface->soft_iface)
436 goto err_free;
437
438 bat_priv = netdev_priv(hard_iface->soft_iface);
439
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200440 if (atomic_read(&bat_priv->mesh_state) != BATADV_MESH_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800441 goto err_free;
442
443 /* discard frames on not active interfaces */
Sven Eckelmanne9a4f292012-06-03 22:19:19 +0200444 if (hard_iface->if_status != BATADV_IF_ACTIVE)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800445 goto err_free;
446
Sven Eckelmann96412692012-06-05 22:31:30 +0200447 batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800448
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100449 if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200450 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +0200451 "Drop packet: incompatible batman version (%i)\n",
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100452 batadv_ogm_packet->version);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800453 goto err_free;
454 }
455
Martin Hundebølle0d96772014-09-17 08:56:19 +0200456 /* reset control block to avoid left overs from previous users */
457 memset(skb->cb, 0, sizeof(struct batadv_skb_cb));
458
Simon Wunderlicha40d9b02013-12-02 20:38:31 +0100459 idx = batadv_ogm_packet->packet_type;
Sven Eckelmannb91a2542016-07-17 21:04:04 +0200460 (*batadv_rx_handler[idx])(skb, hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800461
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100462 batadv_hardif_put(hard_iface);
463
Marek Lindnerffa995e2012-03-01 15:35:17 +0800464 /* return NET_RX_SUCCESS in any case as we
465 * most probably dropped the packet for
466 * routing-logical reasons.
467 */
468 return NET_RX_SUCCESS;
469
470err_free:
471 kfree_skb(skb);
Sven Eckelmann4fe56e62016-03-05 16:09:17 +0100472err_put:
473 batadv_hardif_put(hard_iface);
Marek Lindnerffa995e2012-03-01 15:35:17 +0800474err_out:
475 return NET_RX_DROP;
476}
477
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200478static void batadv_recv_handler_init(void)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800479{
480 int i;
481
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200482 for (i = 0; i < ARRAY_SIZE(batadv_rx_handler); i++)
483 batadv_rx_handler[i] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800484
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200485 for (i = BATADV_UNICAST_MIN; i <= BATADV_UNICAST_MAX; i++)
486 batadv_rx_handler[i] = batadv_recv_unhandled_unicast_packet;
487
Simon Wunderlich031ace82013-12-17 19:12:12 +0100488 /* compile time checks for sizes */
489 BUILD_BUG_ON(sizeof(struct batadv_bla_claim_dst) != 6);
490 BUILD_BUG_ON(sizeof(struct batadv_ogm_packet) != 24);
491 BUILD_BUG_ON(sizeof(struct batadv_icmp_header) != 20);
492 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet) != 20);
493 BUILD_BUG_ON(sizeof(struct batadv_icmp_packet_rr) != 116);
494 BUILD_BUG_ON(sizeof(struct batadv_unicast_packet) != 10);
495 BUILD_BUG_ON(sizeof(struct batadv_unicast_4addr_packet) != 18);
496 BUILD_BUG_ON(sizeof(struct batadv_frag_packet) != 20);
497 BUILD_BUG_ON(sizeof(struct batadv_bcast_packet) != 14);
498 BUILD_BUG_ON(sizeof(struct batadv_coded_packet) != 46);
499 BUILD_BUG_ON(sizeof(struct batadv_unicast_tvlv_packet) != 20);
500 BUILD_BUG_ON(sizeof(struct batadv_tvlv_hdr) != 4);
501 BUILD_BUG_ON(sizeof(struct batadv_tvlv_gateway_data) != 8);
502 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_vlan_data) != 8);
503 BUILD_BUG_ON(sizeof(struct batadv_tvlv_tt_change) != 12);
504 BUILD_BUG_ON(sizeof(struct batadv_tvlv_roam_adv) != 8);
Simon Wunderlich80067c82013-04-25 10:37:22 +0200505
Pankaj Bharadiyac5936422019-12-09 10:31:43 -0800506 i = sizeof_field(struct sk_buff, cb);
Linus Lüssinge2d9ba42017-02-17 11:17:07 +0100507 BUILD_BUG_ON(sizeof(struct batadv_skb_cb) > i);
508
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200509 /* broadcast packet */
510 batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
511
512 /* unicast packets ... */
Antonio Quartulli7cdcf6d2012-10-01 09:57:35 +0200513 /* unicast with 4 addresses packet */
514 batadv_rx_handler[BATADV_UNICAST_4ADDR] = batadv_recv_unicast_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800515 /* unicast packet */
Sven Eckelmannacd34af2012-06-03 22:19:21 +0200516 batadv_rx_handler[BATADV_UNICAST] = batadv_recv_unicast_packet;
Marek Lindneref261572013-04-23 21:39:57 +0800517 /* unicast tvlv packet */
518 batadv_rx_handler[BATADV_UNICAST_TVLV] = batadv_recv_unicast_tvlv;
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200519 /* batman icmp packet */
520 batadv_rx_handler[BATADV_ICMP] = batadv_recv_icmp_packet;
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200521 /* Fragmented packets */
522 batadv_rx_handler[BATADV_UNICAST_FRAG] = batadv_recv_frag_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800523}
524
Sven Eckelmannff15c272017-12-02 19:51:53 +0100525/**
526 * batadv_recv_handler_register() - Register handler for batman-adv packet type
527 * @packet_type: batadv_packettype which should be handled
528 * @recv_handler: receive handler for the packet type
529 *
530 * Return: 0 on success or negative error number in case of failure
531 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200532int
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200533batadv_recv_handler_register(u8 packet_type,
Sven Eckelmann56303d32012-06-05 22:31:31 +0200534 int (*recv_handler)(struct sk_buff *,
535 struct batadv_hard_iface *))
Marek Lindnerffa995e2012-03-01 15:35:17 +0800536{
Sven Eckelmann706cc9f2017-09-30 09:24:28 +0200537 int (*curr)(struct sk_buff *skb,
538 struct batadv_hard_iface *recv_if);
Simon Wunderlicha1f1ac52013-04-25 10:37:23 +0200539 curr = batadv_rx_handler[packet_type];
540
Sven Eckelmann825ffe12017-08-23 21:52:13 +0200541 if (curr != batadv_recv_unhandled_packet &&
542 curr != batadv_recv_unhandled_unicast_packet)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800543 return -EBUSY;
544
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200545 batadv_rx_handler[packet_type] = recv_handler;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800546 return 0;
547}
548
Sven Eckelmannff15c272017-12-02 19:51:53 +0100549/**
550 * batadv_recv_handler_unregister() - Unregister handler for packet type
551 * @packet_type: batadv_packettype which should no longer be handled
552 */
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200553void batadv_recv_handler_unregister(u8 packet_type)
Marek Lindnerffa995e2012-03-01 15:35:17 +0800554{
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200555 batadv_rx_handler[packet_type] = batadv_recv_unhandled_packet;
Marek Lindnerffa995e2012-03-01 15:35:17 +0800556}
557
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200558/**
Sven Eckelmann7e9a8c22017-12-02 19:51:47 +0100559 * batadv_skb_crc32() - calculate CRC32 of the whole packet and skip bytes in
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200560 * the header
561 * @skb: skb pointing to fragmented socket buffers
562 * @payload_ptr: Pointer to position inside the head buffer of the skb
563 * marking the start of the data to be CRC'ed
564 *
565 * payload_ptr must always point to an address in the skb head buffer and not to
566 * a fragment.
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +0100567 *
568 * Return: big endian crc32c of the checksummed data
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200569 */
570__be32 batadv_skb_crc32(struct sk_buff *skb, u8 *payload_ptr)
571{
572 u32 crc = 0;
573 unsigned int from;
574 unsigned int to = skb->len;
575 struct skb_seq_state st;
576 const u8 *data;
577 unsigned int len;
578 unsigned int consumed = 0;
579
580 from = (unsigned int)(payload_ptr - skb->data);
581
582 skb_prepare_seq_read(skb, from, to, &st);
583 while ((len = skb_seq_read(consumed, &data, &st)) != 0) {
584 crc = crc32c(crc, data, len);
585 consumed += len;
586 }
Sven Eckelmann95a066d2012-10-17 21:10:39 +0200587
588 return htonl(crc);
589}
590
Marek Lindneref261572013-04-23 21:39:57 +0800591/**
Sven Eckelmann7e9a8c22017-12-02 19:51:47 +0100592 * batadv_get_vid() - extract the VLAN identifier from skb if any
Antonio Quartullic018ad32013-06-04 12:11:39 +0200593 * @skb: the buffer containing the packet
594 * @header_len: length of the batman header preceding the ethernet header
595 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200596 * Return: VID with the BATADV_VLAN_HAS_TAG flag when the packet embedded in the
597 * skb is vlan tagged. Otherwise BATADV_NO_FLAGS.
Antonio Quartullic018ad32013-06-04 12:11:39 +0200598 */
599unsigned short batadv_get_vid(struct sk_buff *skb, size_t header_len)
600{
601 struct ethhdr *ethhdr = (struct ethhdr *)(skb->data + header_len);
602 struct vlan_ethhdr *vhdr;
603 unsigned short vid;
604
605 if (ethhdr->h_proto != htons(ETH_P_8021Q))
606 return BATADV_NO_FLAGS;
607
608 if (!pskb_may_pull(skb, header_len + VLAN_ETH_HLEN))
609 return BATADV_NO_FLAGS;
610
611 vhdr = (struct vlan_ethhdr *)(skb->data + header_len);
612 vid = ntohs(vhdr->h_vlan_TCI) & VLAN_VID_MASK;
613 vid |= BATADV_VLAN_HAS_TAG;
614
615 return vid;
616}
617
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100618/**
Sven Eckelmann7e9a8c22017-12-02 19:51:47 +0100619 * batadv_vlan_ap_isola_get() - return AP isolation status for the given vlan
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100620 * @bat_priv: the bat priv with all the soft interface information
621 * @vid: the VLAN identifier for which the AP isolation attributed as to be
622 * looked up
623 *
Sven Eckelmannbccb48c2020-06-01 20:13:21 +0200624 * Return: true if AP isolation is on for the VLAN identified by vid, false
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100625 * otherwise
626 */
627bool batadv_vlan_ap_isola_get(struct batadv_priv *bat_priv, unsigned short vid)
628{
629 bool ap_isolation_enabled = false;
630 struct batadv_softif_vlan *vlan;
631
632 /* if the AP isolation is requested on a VLAN, then check for its
633 * setting in the proper VLAN private data structure
634 */
635 vlan = batadv_softif_vlan_get(bat_priv, vid);
636 if (vlan) {
637 ap_isolation_enabled = atomic_read(&vlan->ap_isolation);
Sven Eckelmann9c3bf082016-01-17 11:01:21 +0100638 batadv_softif_vlan_put(vlan);
Antonio Quartullieceb22a2013-11-16 12:03:51 +0100639 }
640
641 return ap_isolation_enabled;
642}
643
Sven Eckelmann0fa4c302019-03-03 18:02:57 +0100644/**
645 * batadv_throw_uevent() - Send an uevent with batman-adv specific env data
646 * @bat_priv: the bat priv with all the soft interface information
647 * @type: subsystem type of event. Stored in uevent's BATTYPE
648 * @action: action type of event. Stored in uevent's BATACTION
649 * @data: string with additional information to the event (ignored for
650 * BATADV_UEV_DEL). Stored in uevent's BATDATA
651 *
652 * Return: 0 on success or negative error number in case of failure
653 */
654int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
655 enum batadv_uev_action action, const char *data)
656{
657 int ret = -ENOMEM;
658 struct kobject *bat_kobj;
659 char *uevent_env[4] = { NULL, NULL, NULL, NULL };
660
661 bat_kobj = &bat_priv->soft_iface->dev.kobj;
662
663 uevent_env[0] = kasprintf(GFP_ATOMIC,
664 "%s%s", BATADV_UEV_TYPE_VAR,
665 batadv_uev_type_str[type]);
666 if (!uevent_env[0])
667 goto out;
668
669 uevent_env[1] = kasprintf(GFP_ATOMIC,
670 "%s%s", BATADV_UEV_ACTION_VAR,
671 batadv_uev_action_str[action]);
672 if (!uevent_env[1])
673 goto out;
674
675 /* If the event is DEL, ignore the data field */
676 if (action != BATADV_UEV_DEL) {
677 uevent_env[2] = kasprintf(GFP_ATOMIC,
678 "%s%s", BATADV_UEV_DATA_VAR, data);
679 if (!uevent_env[2])
680 goto out;
681 }
682
683 ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
684out:
685 kfree(uevent_env[0]);
686 kfree(uevent_env[1]);
687 kfree(uevent_env[2]);
688
689 if (ret)
690 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
691 "Impossible to send uevent for (%s,%s,%s) event (err: %d)\n",
692 batadv_uev_type_str[type],
693 batadv_uev_action_str[action],
694 (action == BATADV_UEV_DEL ? "NULL" : data), ret);
695 return ret;
696}
697
Sven Eckelmannee11ad62012-05-16 20:23:19 +0200698module_init(batadv_init);
699module_exit(batadv_exit);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000700
701MODULE_LICENSE("GPL");
702
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200703MODULE_AUTHOR(BATADV_DRIVER_AUTHOR);
704MODULE_DESCRIPTION(BATADV_DRIVER_DESC);
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200705MODULE_VERSION(BATADV_SOURCE_VERSION);
Sven Eckelmann9791860c2016-06-10 23:00:55 +0200706MODULE_ALIAS_RTNL_LINK("batadv");
Sven Eckelmann2c0c06f2016-10-29 13:56:23 +0200707MODULE_ALIAS_GENL_FAMILY(BATADV_NL_NAME);