blob: 8b2ef8de93e91a3288d84ae42a3b3d780989b368 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/errno.h>
22#include <linux/etherdevice.h>
23#include <linux/fs.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
Sven Eckelmann90f564d2016-01-16 10:29:40 +010026#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020027#include <linux/list.h>
28#include <linux/lockdep.h>
29#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080030#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020031#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/workqueue.h>
35
36#include "distributed-arp-table.h"
37#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038#include "gateway_client.h"
39#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020040#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010041#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042#include "network-coding.h"
43#include "routing.h"
44#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045
Antonio Quartullidec05072012-11-10 11:00:32 +010046/* hash class keys */
47static struct lock_class_key batadv_orig_hash_lock_class_key;
48
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020049static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050
Sven Eckelmann62fe7102015-09-15 19:00:48 +020051/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010052 * batadv_compare_orig - comparing function used in the originator hash table
53 * @node: node in the local table
54 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020055 *
56 * Return: 1 if they are the same originator
57 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020058int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020059{
Sven Eckelmann56303d32012-06-05 22:31:31 +020060 const void *data1 = container_of(node, struct batadv_orig_node,
61 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020062
dingtianhong323813e2013-12-26 19:40:39 +080063 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020064}
65
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020066/**
67 * batadv_orig_node_vlan_get - get an orig_node_vlan object
68 * @orig_node: the originator serving the VLAN
69 * @vid: the VLAN identifier
70 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020071 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020072 * if it does not exist.
73 */
74struct batadv_orig_node_vlan *
75batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
76 unsigned short vid)
77{
78 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
79
80 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080081 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020082 if (tmp->vid != vid)
83 continue;
84
Sven Eckelmann161a3be2016-01-16 10:29:55 +010085 if (!kref_get_unless_zero(&tmp->refcount))
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020086 continue;
87
88 vlan = tmp;
89
90 break;
91 }
92 rcu_read_unlock();
93
94 return vlan;
95}
96
97/**
98 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
99 * object
100 * @orig_node: the originator serving the VLAN
101 * @vid: the VLAN identifier
102 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200103 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200104 * belonging to orig_node otherwise. The object is created and added to the list
105 * if it does not exist.
106 *
107 * The object is returned with refcounter increased by 1.
108 */
109struct batadv_orig_node_vlan *
110batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
111 unsigned short vid)
112{
113 struct batadv_orig_node_vlan *vlan;
114
115 spin_lock_bh(&orig_node->vlan_list_lock);
116
117 /* first look if an object for this vid already exists */
118 vlan = batadv_orig_node_vlan_get(orig_node, vid);
119 if (vlan)
120 goto out;
121
122 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
123 if (!vlan)
124 goto out;
125
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100126 kref_init(&vlan->refcount);
127 kref_get(&vlan->refcount);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200128 vlan->vid = vid;
129
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800130 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200131
132out:
133 spin_unlock_bh(&orig_node->vlan_list_lock);
134
135 return vlan;
136}
137
138/**
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100139 * batadv_orig_node_vlan_release - release originator-vlan object from lists
140 * and queue for free after rcu grace period
141 * @ref: kref pointer of the originator-vlan object
142 */
143static void batadv_orig_node_vlan_release(struct kref *ref)
144{
145 struct batadv_orig_node_vlan *orig_vlan;
146
147 orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
148
149 kfree_rcu(orig_vlan, rcu);
150}
151
152/**
153 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly
154 * release the originator-vlan object
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200155 * @orig_vlan: the originator-vlan object to release
156 */
157void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
158{
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100159 kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200160}
161
Sven Eckelmann56303d32012-06-05 22:31:31 +0200162int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000163{
164 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200165 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000166
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200167 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000168
169 if (!bat_priv->orig_hash)
170 goto err;
171
Antonio Quartullidec05072012-11-10 11:00:32 +0100172 batadv_hash_set_lock_class(bat_priv->orig_hash,
173 &batadv_orig_hash_lock_class_key);
174
Antonio Quartulli72414442012-12-25 13:14:37 +0100175 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
176 queue_delayed_work(batadv_event_workqueue,
177 &bat_priv->orig_work,
178 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
179
Sven Eckelmann5346c352012-05-05 13:27:28 +0200180 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000181
182err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200183 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000184}
185
Simon Wunderlich89652332013-11-13 19:14:46 +0100186/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100187 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
188 * free after rcu grace period
Sven Eckelmann962c6832016-01-16 10:29:51 +0100189 * @ref: kref pointer of the neigh_ifinfo
Simon Wunderlich89652332013-11-13 19:14:46 +0100190 */
Sven Eckelmann962c6832016-01-16 10:29:51 +0100191static void batadv_neigh_ifinfo_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100192{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100193 struct batadv_neigh_ifinfo *neigh_ifinfo;
194
195 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
196
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100197 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
198 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
199
200 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100201}
202
203/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100204 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100205 * the neigh_ifinfo
206 * @neigh_ifinfo: the neigh_ifinfo object to release
207 */
208void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
209{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100210 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
Simon Wunderlich89652332013-11-13 19:14:46 +0100211}
212
213/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100214 * batadv_hardif_neigh_release - release hardif neigh node from lists and
215 * queue for free after rcu grace period
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100216 * @ref: kref pointer of the neigh_node
Marek Lindnercef63412015-08-04 21:09:55 +0800217 */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100218static void batadv_hardif_neigh_release(struct kref *ref)
Marek Lindnercef63412015-08-04 21:09:55 +0800219{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100220 struct batadv_hardif_neigh_node *hardif_neigh;
221
222 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
223 refcount);
224
Sven Eckelmannf6389692016-01-05 12:06:23 +0100225 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
226 hlist_del_init_rcu(&hardif_neigh->list);
227 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100228
Sven Eckelmannf6389692016-01-05 12:06:23 +0100229 batadv_hardif_free_ref(hardif_neigh->if_incoming);
230 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800231}
232
233/**
234 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100235 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800236 * @hardif_neigh: hardif neigh neighbor to free
237 */
238void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
239{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100240 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
Marek Lindnercef63412015-08-04 21:09:55 +0800241}
242
243/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100244 * batadv_neigh_node_release - release neigh_node from lists and queue for
245 * free after rcu grace period
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100246 * @ref: kref pointer of the neigh_node
Simon Wunderlich89652332013-11-13 19:14:46 +0100247 */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100248static void batadv_neigh_node_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100249{
250 struct hlist_node *node_tmp;
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100251 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800252 struct batadv_hardif_neigh_node *hardif_neigh;
Simon Wunderlich89652332013-11-13 19:14:46 +0100253 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800254 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100255
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100256 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800257 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100258
259 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
260 &neigh_node->ifinfo_list, list) {
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100261 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100262 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800263
Marek Lindnercef63412015-08-04 21:09:55 +0800264 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
265 neigh_node->addr);
266 if (hardif_neigh) {
267 /* batadv_hardif_neigh_get() increases refcount too */
Sven Eckelmannf6389692016-01-05 12:06:23 +0100268 batadv_hardif_neigh_free_ref(hardif_neigh);
269 batadv_hardif_neigh_free_ref(hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800270 }
271
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800272 if (bao->bat_neigh_free)
273 bao->bat_neigh_free(neigh_node);
274
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100275 batadv_hardif_free_ref(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100276
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100277 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100278}
279
280/**
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100281 * batadv_neigh_node_free_ref - decrement the neighbors refcounter and possibly
282 * release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100283 * @neigh_node: neigh neighbor to free
284 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200285void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000286{
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100287 kref_put(&neigh_node->refcount, batadv_neigh_node_release);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000288}
289
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100290/**
291 * batadv_orig_node_get_router - router to the originator depending on iface
292 * @orig_node: the orig node for the router
293 * @if_outgoing: the interface where the payload packet has been received or
294 * the OGM should be sent to
295 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200296 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100297 *
298 * The object is returned with refcounter increased by 1.
299 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200300struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100301batadv_orig_router_get(struct batadv_orig_node *orig_node,
302 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000303{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100304 struct batadv_orig_ifinfo *orig_ifinfo;
305 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000306
307 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100308 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
309 if (orig_ifinfo->if_outgoing != if_outgoing)
310 continue;
311
312 router = rcu_dereference(orig_ifinfo->router);
313 break;
314 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000315
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100316 if (router && !kref_get_unless_zero(&router->refcount))
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000317 router = NULL;
318
319 rcu_read_unlock();
320 return router;
321}
322
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200323/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100324 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
325 * @orig_node: the orig node to be queried
326 * @if_outgoing: the interface for which the ifinfo should be acquired
327 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200328 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100329 *
330 * The object is returned with refcounter increased by 1.
331 */
332struct batadv_orig_ifinfo *
333batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
334 struct batadv_hard_iface *if_outgoing)
335{
336 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
337
338 rcu_read_lock();
339 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
340 list) {
341 if (tmp->if_outgoing != if_outgoing)
342 continue;
343
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100344 if (!kref_get_unless_zero(&tmp->refcount))
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100345 continue;
346
347 orig_ifinfo = tmp;
348 break;
349 }
350 rcu_read_unlock();
351
352 return orig_ifinfo;
353}
354
355/**
356 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
357 * @orig_node: the orig node to be queried
358 * @if_outgoing: the interface for which the ifinfo should be acquired
359 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200360 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100361 * interface otherwise. The object is created and added to the list
362 * if it does not exist.
363 *
364 * The object is returned with refcounter increased by 1.
365 */
366struct batadv_orig_ifinfo *
367batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
368 struct batadv_hard_iface *if_outgoing)
369{
370 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
371 unsigned long reset_time;
372
373 spin_lock_bh(&orig_node->neigh_list_lock);
374
375 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
376 if (orig_ifinfo)
377 goto out;
378
379 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
380 if (!orig_ifinfo)
381 goto out;
382
383 if (if_outgoing != BATADV_IF_DEFAULT &&
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100384 !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100385 kfree(orig_ifinfo);
386 orig_ifinfo = NULL;
387 goto out;
388 }
389
390 reset_time = jiffies - 1;
391 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
392 orig_ifinfo->batman_seqno_reset = reset_time;
393 orig_ifinfo->if_outgoing = if_outgoing;
394 INIT_HLIST_NODE(&orig_ifinfo->list);
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100395 kref_init(&orig_ifinfo->refcount);
396 kref_get(&orig_ifinfo->refcount);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100397 hlist_add_head_rcu(&orig_ifinfo->list,
398 &orig_node->ifinfo_list);
399out:
400 spin_unlock_bh(&orig_node->neigh_list_lock);
401 return orig_ifinfo;
402}
403
404/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100405 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200406 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100407 * @if_outgoing: the interface for which the ifinfo should be acquired
408 *
409 * The object is returned with refcounter increased by 1.
410 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200411 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100412 */
413struct batadv_neigh_ifinfo *
414batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
415 struct batadv_hard_iface *if_outgoing)
416{
417 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
418 *tmp_neigh_ifinfo;
419
420 rcu_read_lock();
421 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
422 list) {
423 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
424 continue;
425
Sven Eckelmann962c6832016-01-16 10:29:51 +0100426 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100427 continue;
428
429 neigh_ifinfo = tmp_neigh_ifinfo;
430 break;
431 }
432 rcu_read_unlock();
433
434 return neigh_ifinfo;
435}
436
437/**
438 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200439 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100440 * @if_outgoing: the interface for which the ifinfo should be acquired
441 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200442 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100443 * if_outgoing interface otherwise. The object is created and added to the list
444 * if it does not exist.
445 *
446 * The object is returned with refcounter increased by 1.
447 */
448struct batadv_neigh_ifinfo *
449batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
450 struct batadv_hard_iface *if_outgoing)
451{
452 struct batadv_neigh_ifinfo *neigh_ifinfo;
453
454 spin_lock_bh(&neigh->ifinfo_lock);
455
456 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
457 if (neigh_ifinfo)
458 goto out;
459
460 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
461 if (!neigh_ifinfo)
462 goto out;
463
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100464 if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich89652332013-11-13 19:14:46 +0100465 kfree(neigh_ifinfo);
466 neigh_ifinfo = NULL;
467 goto out;
468 }
469
470 INIT_HLIST_NODE(&neigh_ifinfo->list);
Sven Eckelmann962c6832016-01-16 10:29:51 +0100471 kref_init(&neigh_ifinfo->refcount);
472 kref_get(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100473 neigh_ifinfo->if_outgoing = if_outgoing;
474
475 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
476
477out:
478 spin_unlock_bh(&neigh->ifinfo_lock);
479
480 return neigh_ifinfo;
481}
482
483/**
Marek Lindnered292662015-08-04 23:31:44 +0800484 * batadv_neigh_node_get - retrieve a neighbour from the list
485 * @orig_node: originator which the neighbour belongs to
486 * @hard_iface: the interface where this neighbour is connected to
487 * @addr: the address of the neighbour
488 *
489 * Looks for and possibly returns a neighbour belonging to this originator list
490 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200491 *
492 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800493 */
494static struct batadv_neigh_node *
495batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
496 const struct batadv_hard_iface *hard_iface,
497 const u8 *addr)
498{
499 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
500
501 rcu_read_lock();
502 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
503 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
504 continue;
505
506 if (tmp_neigh_node->if_incoming != hard_iface)
507 continue;
508
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100509 if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
Marek Lindnered292662015-08-04 23:31:44 +0800510 continue;
511
512 res = tmp_neigh_node;
513 break;
514 }
515 rcu_read_unlock();
516
517 return res;
518}
519
520/**
Marek Lindnercef63412015-08-04 21:09:55 +0800521 * batadv_hardif_neigh_create - create a hardif neighbour node
522 * @hard_iface: the interface this neighbour is connected to
523 * @neigh_addr: the interface address of the neighbour to retrieve
524 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200525 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800526 */
527static struct batadv_hardif_neigh_node *
528batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
529 const u8 *neigh_addr)
530{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800531 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800532 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
533
534 spin_lock_bh(&hard_iface->neigh_list_lock);
535
536 /* check if neighbor hasn't been added in the meantime */
537 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
538 if (hardif_neigh)
539 goto out;
540
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100541 if (!kref_get_unless_zero(&hard_iface->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800542 goto out;
543
544 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
545 if (!hardif_neigh) {
546 batadv_hardif_free_ref(hard_iface);
547 goto out;
548 }
549
550 INIT_HLIST_NODE(&hardif_neigh->list);
551 ether_addr_copy(hardif_neigh->addr, neigh_addr);
552 hardif_neigh->if_incoming = hard_iface;
553 hardif_neigh->last_seen = jiffies;
554
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100555 kref_init(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800556
Marek Lindner8248a4c2015-08-04 21:09:56 +0800557 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
558 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
559
Marek Lindnercef63412015-08-04 21:09:55 +0800560 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
561
562out:
563 spin_unlock_bh(&hard_iface->neigh_list_lock);
564 return hardif_neigh;
565}
566
567/**
568 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
569 * node
570 * @hard_iface: the interface this neighbour is connected to
571 * @neigh_addr: the interface address of the neighbour to retrieve
572 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200573 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800574 */
575static struct batadv_hardif_neigh_node *
576batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
577 const u8 *neigh_addr)
578{
579 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
580
581 /* first check without locking to avoid the overhead */
582 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
583 if (hardif_neigh)
584 return hardif_neigh;
585
586 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
587}
588
589/**
590 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
591 * @hard_iface: the interface where this neighbour is connected to
592 * @neigh_addr: the address of the neighbour
593 *
594 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200595 *
596 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800597 */
598struct batadv_hardif_neigh_node *
599batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
600 const u8 *neigh_addr)
601{
602 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
603
604 rcu_read_lock();
605 hlist_for_each_entry_rcu(tmp_hardif_neigh,
606 &hard_iface->neigh_list, list) {
607 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
608 continue;
609
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100610 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800611 continue;
612
613 hardif_neigh = tmp_hardif_neigh;
614 break;
615 }
616 rcu_read_unlock();
617
618 return hardif_neigh;
619}
620
621/**
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200622 * batadv_neigh_node_new - create and init a new neigh_node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800623 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200624 * @hard_iface: the interface where the neighbour is connected to
625 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200626 *
627 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200628 *
629 * Return: neighbor when found. Othwerwise NULL
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200630 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200631struct batadv_neigh_node *
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800632batadv_neigh_node_new(struct batadv_orig_node *orig_node,
633 struct batadv_hard_iface *hard_iface,
634 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000635{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200636 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800637 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000638
Marek Lindner741aa062015-07-26 04:57:43 +0800639 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
640 if (neigh_node)
641 goto out;
642
Marek Lindnercef63412015-08-04 21:09:55 +0800643 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
644 neigh_addr);
645 if (!hardif_neigh)
646 goto out;
647
Sven Eckelmann704509b2011-05-14 23:14:54 +0200648 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000649 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800650 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000651
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100652 if (!kref_get_unless_zero(&hard_iface->refcount)) {
Marek Lindnerf729dc702015-07-26 04:37:15 +0800653 kfree(neigh_node);
654 neigh_node = NULL;
655 goto out;
656 }
657
Marek Lindner9591a792010-12-12 21:57:11 +0000658 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100659 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
660 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000661
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100662 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200663 neigh_node->if_incoming = hard_iface;
664 neigh_node->orig_node = orig_node;
665
Marek Lindner1605d0d2011-02-18 12:28:11 +0000666 /* extra reference for return */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100667 kref_init(&neigh_node->refcount);
668 kref_get(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000669
Marek Lindner741aa062015-07-26 04:57:43 +0800670 spin_lock_bh(&orig_node->neigh_list_lock);
671 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
672 spin_unlock_bh(&orig_node->neigh_list_lock);
673
Marek Lindnercef63412015-08-04 21:09:55 +0800674 /* increment unique neighbor refcount */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100675 kref_get(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800676
Marek Lindner741aa062015-07-26 04:57:43 +0800677 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
678 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
679 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
680
Marek Lindner7ae8b282012-03-01 15:35:21 +0800681out:
Marek Lindnercef63412015-08-04 21:09:55 +0800682 if (hardif_neigh)
683 batadv_hardif_neigh_free_ref(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000684 return neigh_node;
685}
686
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100687/**
Marek Lindner75874052015-08-04 21:09:57 +0800688 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
689 * @seq: neighbour table seq_file struct
690 * @offset: not used
691 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200692 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800693 */
694int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
695{
696 struct net_device *net_dev = (struct net_device *)seq->private;
697 struct batadv_priv *bat_priv = netdev_priv(net_dev);
698 struct batadv_hard_iface *primary_if;
699
700 primary_if = batadv_seq_print_text_primary_if_get(seq);
701 if (!primary_if)
702 return 0;
703
704 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
705 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
706 primary_if->net_dev->dev_addr, net_dev->name,
707 bat_priv->bat_algo_ops->name);
708
709 batadv_hardif_free_ref(primary_if);
710
711 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
712 seq_puts(seq,
713 "No printing function for this routing protocol\n");
714 return 0;
715 }
716
717 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
718 return 0;
719}
720
721/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100722 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
723 * free after rcu grace period
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100724 * @ref: kref pointer of the orig_ifinfo
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100725 */
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100726static void batadv_orig_ifinfo_release(struct kref *ref)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100727{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100728 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100729 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100730
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100731 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
732
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100733 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100734 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100735
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100736 /* this is the last reference to this object */
737 router = rcu_dereference_protected(orig_ifinfo->router, true);
738 if (router)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100739 batadv_neigh_node_free_ref(router);
740
741 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100742}
743
744/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100745 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100746 * the orig_ifinfo
747 * @orig_ifinfo: the orig_ifinfo object to release
748 */
749void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
750{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100751 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100752}
753
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100754/**
755 * batadv_orig_node_free_rcu - free the orig_node
756 * @rcu: rcu pointer of the orig_node
757 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200758static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000759{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200760 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000761
Sven Eckelmann56303d32012-06-05 22:31:31 +0200762 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000763
Linus Lüssing60432d72014-02-15 17:47:51 +0100764 batadv_mcast_purge_orig(orig_node);
765
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200766 batadv_frag_purge_orig(orig_node, NULL);
767
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200768 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
769 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
770
Antonio Quartullia73105b2011-04-27 14:27:44 +0200771 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000772 kfree(orig_node);
773}
774
Linus Lüssing72822222013-04-15 21:43:29 +0800775/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100776 * batadv_orig_node_release - release orig_node from lists and queue for
777 * free after rcu grace period
778 * @orig_node: the orig node to free
779 */
780static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
781{
782 struct hlist_node *node_tmp;
783 struct batadv_neigh_node *neigh_node;
784 struct batadv_orig_ifinfo *orig_ifinfo;
785
786 spin_lock_bh(&orig_node->neigh_list_lock);
787
788 /* for all neighbors towards this originator ... */
789 hlist_for_each_entry_safe(neigh_node, node_tmp,
790 &orig_node->neigh_list, list) {
791 hlist_del_rcu(&neigh_node->list);
792 batadv_neigh_node_free_ref(neigh_node);
793 }
794
795 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
796 &orig_node->ifinfo_list, list) {
797 hlist_del_rcu(&orig_ifinfo->list);
798 batadv_orig_ifinfo_free_ref(orig_ifinfo);
799 }
800 spin_unlock_bh(&orig_node->neigh_list_lock);
801
802 /* Free nc_nodes */
803 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
804
805 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
806}
807
808/**
Linus Lüssing72822222013-04-15 21:43:29 +0800809 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100810 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800811 * @orig_node: the orig node to free
812 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200813void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000814{
815 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100816 batadv_orig_node_release(orig_node);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000817}
818
Sven Eckelmann56303d32012-06-05 22:31:31 +0200819void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000820{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200821 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800822 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000823 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000824 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200825 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200826 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000827
828 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000829 return;
830
831 cancel_delayed_work_sync(&bat_priv->orig_work);
832
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000833 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000834
835 for (i = 0; i < hash->size; i++) {
836 head = &hash->table[i];
837 list_lock = &hash->list_locks[i];
838
839 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800840 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000841 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800842 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200843 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000844 }
845 spin_unlock_bh(list_lock);
846 }
847
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200848 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000849}
850
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200851/**
852 * batadv_orig_node_new - creates a new orig_node
853 * @bat_priv: the bat priv with all the soft interface information
854 * @addr: the mac address of the originator
855 *
856 * Creates a new originator object and initialise all the generic fields.
857 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200858 *
859 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200860 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200861struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200862 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000863{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200864 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200865 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200866 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200867 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000868
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200869 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
870 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000871
Sven Eckelmann704509b2011-05-14 23:14:54 +0200872 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000873 if (!orig_node)
874 return NULL;
875
Marek Lindner9591a792010-12-12 21:57:11 +0000876 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800877 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100878 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000879 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000880 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200881 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200882 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200883 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000884
Martin Hundebølld56b1702013-01-25 11:12:39 +0100885 batadv_nc_init_orig(orig_node);
886
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000887 /* extra reference for return */
888 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000889
Marek Lindner16b1aba2011-01-19 20:01:42 +0000890 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100891 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100892 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200893 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200894 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200895 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100896 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200897 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
898 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200899
Linus Lüssing60432d72014-02-15 17:47:51 +0100900#ifdef CONFIG_BATMAN_ADV_MCAST
901 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200902 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
903 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
904 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
905 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100906#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000907
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200908 /* create a vlan object for the "untagged" LAN */
909 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
910 if (!vlan)
911 goto free_orig_node;
912 /* batadv_orig_node_vlan_new() increases the refcounter.
913 * Immediately release vlan since it is not needed anymore in this
914 * context
915 */
916 batadv_orig_node_vlan_free_ref(vlan);
917
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200918 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
919 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
920 spin_lock_init(&orig_node->fragments[i].lock);
921 orig_node->fragments[i].size = 0;
922 }
923
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000924 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000925free_orig_node:
926 kfree(orig_node);
927 return NULL;
928}
929
Simon Wunderlich89652332013-11-13 19:14:46 +0100930/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100931 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
932 * @bat_priv: the bat priv with all the soft interface information
933 * @neigh: orig node which is to be checked
934 */
935static void
936batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
937 struct batadv_neigh_node *neigh)
938{
939 struct batadv_neigh_ifinfo *neigh_ifinfo;
940 struct batadv_hard_iface *if_outgoing;
941 struct hlist_node *node_tmp;
942
943 spin_lock_bh(&neigh->ifinfo_lock);
944
945 /* for all ifinfo objects for this neighinator */
946 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
947 &neigh->ifinfo_list, list) {
948 if_outgoing = neigh_ifinfo->if_outgoing;
949
950 /* always keep the default interface */
951 if (if_outgoing == BATADV_IF_DEFAULT)
952 continue;
953
954 /* don't purge if the interface is not (going) down */
955 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
956 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
957 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
958 continue;
959
960 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
961 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
962 neigh->addr, if_outgoing->net_dev->name);
963
964 hlist_del_rcu(&neigh_ifinfo->list);
965 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
966 }
967
968 spin_unlock_bh(&neigh->ifinfo_lock);
969}
970
971/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100972 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
973 * @bat_priv: the bat priv with all the soft interface information
974 * @orig_node: orig node which is to be checked
975 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200976 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100977 */
978static bool
979batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
980 struct batadv_orig_node *orig_node)
981{
982 struct batadv_orig_ifinfo *orig_ifinfo;
983 struct batadv_hard_iface *if_outgoing;
984 struct hlist_node *node_tmp;
985 bool ifinfo_purged = false;
986
987 spin_lock_bh(&orig_node->neigh_list_lock);
988
989 /* for all ifinfo objects for this originator */
990 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
991 &orig_node->ifinfo_list, list) {
992 if_outgoing = orig_ifinfo->if_outgoing;
993
994 /* always keep the default interface */
995 if (if_outgoing == BATADV_IF_DEFAULT)
996 continue;
997
998 /* don't purge if the interface is not (going) down */
999 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
1000 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
1001 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
1002 continue;
1003
1004 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1005 "router/ifinfo purge: originator %pM, iface: %s\n",
1006 orig_node->orig, if_outgoing->net_dev->name);
1007
1008 ifinfo_purged = true;
1009
1010 hlist_del_rcu(&orig_ifinfo->list);
1011 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +01001012 if (orig_node->last_bonding_candidate == orig_ifinfo) {
1013 orig_node->last_bonding_candidate = NULL;
1014 batadv_orig_ifinfo_free_ref(orig_ifinfo);
1015 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001016 }
1017
1018 spin_unlock_bh(&orig_node->neigh_list_lock);
1019
1020 return ifinfo_purged;
1021}
1022
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001023/**
Simon Wunderlich89652332013-11-13 19:14:46 +01001024 * batadv_purge_orig_neighbors - purges neighbors from originator
1025 * @bat_priv: the bat priv with all the soft interface information
1026 * @orig_node: orig node which is to be checked
1027 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001028 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001029 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001030static bool
1031batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001032 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001033{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001034 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001035 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001036 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001037 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001038 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001039
Marek Lindnerf987ed62010-12-12 21:57:12 +00001040 spin_lock_bh(&orig_node->neigh_list_lock);
1041
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001042 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001043 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001044 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001045 last_seen = neigh_node->last_seen;
1046 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001047
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001048 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001049 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1050 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1051 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001052 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1053 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1054 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001055 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001056 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1057 orig_node->orig, neigh_node->addr,
1058 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001059 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001060 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001061 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1062 orig_node->orig, neigh_node->addr,
1063 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001064
1065 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001066
Marek Lindnerf987ed62010-12-12 21:57:12 +00001067 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001068 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001069 } else {
1070 /* only necessary if not the whole neighbor is to be
1071 * deleted, but some interface has been removed.
1072 */
1073 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001074 }
1075 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001076
1077 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001078 return neigh_purged;
1079}
1080
Simon Wunderlich89652332013-11-13 19:14:46 +01001081/**
1082 * batadv_find_best_neighbor - finds the best neighbor after purging
1083 * @bat_priv: the bat priv with all the soft interface information
1084 * @orig_node: orig node which is to be checked
1085 * @if_outgoing: the interface for which the metric should be compared
1086 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001087 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001088 */
1089static struct batadv_neigh_node *
1090batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1091 struct batadv_orig_node *orig_node,
1092 struct batadv_hard_iface *if_outgoing)
1093{
1094 struct batadv_neigh_node *best = NULL, *neigh;
1095 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1096
1097 rcu_read_lock();
1098 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1099 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1100 best, if_outgoing) <= 0))
1101 continue;
1102
Sven Eckelmann77ae32e2016-01-16 10:29:53 +01001103 if (!kref_get_unless_zero(&neigh->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +01001104 continue;
1105
1106 if (best)
1107 batadv_neigh_node_free_ref(best);
1108
1109 best = neigh;
1110 }
1111 rcu_read_unlock();
1112
1113 return best;
1114}
1115
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001116/**
1117 * batadv_purge_orig_node - purges obsolete information from an orig_node
1118 * @bat_priv: the bat priv with all the soft interface information
1119 * @orig_node: orig node which is to be checked
1120 *
1121 * This function checks if the orig_node or substructures of it have become
1122 * obsolete, and purges this information if that's the case.
1123 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001124 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001125 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001126static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1127 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001128{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001129 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001130 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001131 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001132
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001133 if (batadv_has_timed_out(orig_node->last_seen,
1134 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001135 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001136 "Originator timeout: originator %pM, last_seen %u\n",
1137 orig_node->orig,
1138 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001139 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001140 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001141 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1142 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001143
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001144 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001145 return false;
1146
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001147 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001148 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1149 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001150 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1151 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001152 if (best_neigh_node)
1153 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001154
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001155 /* ... then for all other interfaces. */
1156 rcu_read_lock();
1157 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1158 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1159 continue;
1160
1161 if (hard_iface->soft_iface != bat_priv->soft_iface)
1162 continue;
1163
1164 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1165 orig_node,
1166 hard_iface);
1167 batadv_update_route(bat_priv, orig_node, hard_iface,
1168 best_neigh_node);
1169 if (best_neigh_node)
1170 batadv_neigh_node_free_ref(best_neigh_node);
1171 }
1172 rcu_read_unlock();
1173
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001174 return false;
1175}
1176
Sven Eckelmann56303d32012-06-05 22:31:31 +02001177static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001178{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001179 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001180 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001181 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001182 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001183 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001184 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001185
1186 if (!hash)
1187 return;
1188
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001189 /* for all origins... */
1190 for (i = 0; i < hash->size; i++) {
1191 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001192 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001193
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001194 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001195 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001196 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001197 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001198 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001199 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001200 batadv_tt_global_del_orig(orig_node->bat_priv,
1201 orig_node, -1,
1202 "originator timed out");
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001203 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001204 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001205 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001206
1207 batadv_frag_purge_orig(orig_node,
1208 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001209 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001210 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001211 }
1212
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001213 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001214}
1215
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001216static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001217{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001218 struct delayed_work *delayed_work;
1219 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001220
Sven Eckelmann56303d32012-06-05 22:31:31 +02001221 delayed_work = container_of(work, struct delayed_work, work);
1222 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001223 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001224 queue_delayed_work(batadv_event_workqueue,
1225 &bat_priv->orig_work,
1226 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001227}
1228
Sven Eckelmann56303d32012-06-05 22:31:31 +02001229void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001230{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001231 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001232}
1233
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001234int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001235{
1236 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001237 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001238 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001239
Marek Lindner30da63a2012-08-03 17:15:46 +02001240 primary_if = batadv_seq_print_text_primary_if_get(seq);
1241 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001242 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001243
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001244 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001245 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001246 primary_if->net_dev->dev_addr, net_dev->name,
1247 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001248
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001249 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001250
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001251 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1252 seq_puts(seq,
1253 "No printing function for this routing protocol\n");
1254 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001255 }
1256
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001257 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1258 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001259
Marek Lindner30da63a2012-08-03 17:15:46 +02001260 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001261}
1262
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001263/**
1264 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1265 * outgoing interface
1266 * @seq: debugfs table seq_file struct
1267 * @offset: not used
1268 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001269 * Return: 0
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001270 */
1271int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1272{
1273 struct net_device *net_dev = (struct net_device *)seq->private;
1274 struct batadv_hard_iface *hard_iface;
1275 struct batadv_priv *bat_priv;
1276
1277 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1278
1279 if (!hard_iface || !hard_iface->soft_iface) {
1280 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1281 goto out;
1282 }
1283
1284 bat_priv = netdev_priv(hard_iface->soft_iface);
1285 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1286 seq_puts(seq,
1287 "No printing function for this routing protocol\n");
1288 goto out;
1289 }
1290
1291 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1292 seq_puts(seq, "Interface not active\n");
1293 goto out;
1294 }
1295
1296 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1297 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1298 hard_iface->net_dev->dev_addr,
1299 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1300
1301 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1302
1303out:
Marek Lindner16a41422014-04-24 03:44:25 +08001304 if (hard_iface)
1305 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001306 return 0;
1307}
1308
Sven Eckelmann56303d32012-06-05 22:31:31 +02001309int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1310 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001311{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001312 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001313 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001314 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001315 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001316 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001317 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001318 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001319
1320 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001321 * if_num
1322 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001323 for (i = 0; i < hash->size; i++) {
1324 head = &hash->table[i];
1325
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001326 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001327 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001328 ret = 0;
1329 if (bao->bat_orig_add_if)
1330 ret = bao->bat_orig_add_if(orig_node,
1331 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001332 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001333 goto err;
1334 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001335 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001336 }
1337
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001338 return 0;
1339
1340err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001341 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001342 return -ENOMEM;
1343}
1344
Sven Eckelmann56303d32012-06-05 22:31:31 +02001345int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1346 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001347{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001348 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001349 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001350 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001351 struct batadv_hard_iface *hard_iface_tmp;
1352 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001353 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001354 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001355 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001356
1357 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001358 * if_num
1359 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001360 for (i = 0; i < hash->size; i++) {
1361 head = &hash->table[i];
1362
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001363 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001364 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001365 ret = 0;
1366 if (bao->bat_orig_del_if)
1367 ret = bao->bat_orig_del_if(orig_node,
1368 max_if_num,
1369 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001370 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001371 goto err;
1372 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001373 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001374 }
1375
1376 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1377 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001378 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001379 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001380 continue;
1381
Marek Lindnere6c10f42011-02-18 12:33:20 +00001382 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001383 continue;
1384
Marek Lindnere6c10f42011-02-18 12:33:20 +00001385 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001386 continue;
1387
Marek Lindnere6c10f42011-02-18 12:33:20 +00001388 if (hard_iface_tmp->if_num > hard_iface->if_num)
1389 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001390 }
1391 rcu_read_unlock();
1392
Marek Lindnere6c10f42011-02-18 12:33:20 +00001393 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001394 return 0;
1395
1396err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001397 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001398 return -ENOMEM;
1399}