blob: 1c50abe52c6daeee5f13f33b05c47497b3beee0a [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
21#include <linux/errno.h>
22#include <linux/etherdevice.h>
23#include <linux/fs.h>
24#include <linux/jiffies.h>
25#include <linux/kernel.h>
Sven Eckelmann90f564d2016-01-16 10:29:40 +010026#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020027#include <linux/list.h>
28#include <linux/lockdep.h>
29#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080030#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020031#include <linux/seq_file.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
34#include <linux/workqueue.h>
35
36#include "distributed-arp-table.h"
37#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000038#include "gateway_client.h"
39#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020040#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010041#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042#include "network-coding.h"
43#include "routing.h"
44#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000045
Antonio Quartullidec05072012-11-10 11:00:32 +010046/* hash class keys */
47static struct lock_class_key batadv_orig_hash_lock_class_key;
48
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020049static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000050
Sven Eckelmann62fe7102015-09-15 19:00:48 +020051/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010052 * batadv_compare_orig - comparing function used in the originator hash table
53 * @node: node in the local table
54 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020055 *
56 * Return: 1 if they are the same originator
57 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020058int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020059{
Sven Eckelmann56303d32012-06-05 22:31:31 +020060 const void *data1 = container_of(node, struct batadv_orig_node,
61 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020062
dingtianhong323813e2013-12-26 19:40:39 +080063 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020064}
65
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020066/**
67 * batadv_orig_node_vlan_get - get an orig_node_vlan object
68 * @orig_node: the originator serving the VLAN
69 * @vid: the VLAN identifier
70 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020071 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020072 * if it does not exist.
73 */
74struct batadv_orig_node_vlan *
75batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
76 unsigned short vid)
77{
78 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
79
80 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080081 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020082 if (tmp->vid != vid)
83 continue;
84
85 if (!atomic_inc_not_zero(&tmp->refcount))
86 continue;
87
88 vlan = tmp;
89
90 break;
91 }
92 rcu_read_unlock();
93
94 return vlan;
95}
96
97/**
98 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
99 * object
100 * @orig_node: the originator serving the VLAN
101 * @vid: the VLAN identifier
102 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200103 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200104 * belonging to orig_node otherwise. The object is created and added to the list
105 * if it does not exist.
106 *
107 * The object is returned with refcounter increased by 1.
108 */
109struct batadv_orig_node_vlan *
110batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
111 unsigned short vid)
112{
113 struct batadv_orig_node_vlan *vlan;
114
115 spin_lock_bh(&orig_node->vlan_list_lock);
116
117 /* first look if an object for this vid already exists */
118 vlan = batadv_orig_node_vlan_get(orig_node, vid);
119 if (vlan)
120 goto out;
121
122 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
123 if (!vlan)
124 goto out;
125
126 atomic_set(&vlan->refcount, 2);
127 vlan->vid = vid;
128
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800129 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200130
131out:
132 spin_unlock_bh(&orig_node->vlan_list_lock);
133
134 return vlan;
135}
136
137/**
138 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
139 * the originator-vlan object
140 * @orig_vlan: the originator-vlan object to release
141 */
142void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
143{
144 if (atomic_dec_and_test(&orig_vlan->refcount))
145 kfree_rcu(orig_vlan, rcu);
146}
147
Sven Eckelmann56303d32012-06-05 22:31:31 +0200148int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000149{
150 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200151 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000152
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200153 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000154
155 if (!bat_priv->orig_hash)
156 goto err;
157
Antonio Quartullidec05072012-11-10 11:00:32 +0100158 batadv_hash_set_lock_class(bat_priv->orig_hash,
159 &batadv_orig_hash_lock_class_key);
160
Antonio Quartulli72414442012-12-25 13:14:37 +0100161 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
162 queue_delayed_work(batadv_event_workqueue,
163 &bat_priv->orig_work,
164 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
165
Sven Eckelmann5346c352012-05-05 13:27:28 +0200166 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
168err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200169 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170}
171
Simon Wunderlich89652332013-11-13 19:14:46 +0100172/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100173 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
174 * free after rcu grace period
Sven Eckelmann962c6832016-01-16 10:29:51 +0100175 * @ref: kref pointer of the neigh_ifinfo
Simon Wunderlich89652332013-11-13 19:14:46 +0100176 */
Sven Eckelmann962c6832016-01-16 10:29:51 +0100177static void batadv_neigh_ifinfo_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100178{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100179 struct batadv_neigh_ifinfo *neigh_ifinfo;
180
181 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
182
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100183 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
184 batadv_hardif_free_ref(neigh_ifinfo->if_outgoing);
185
186 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100187}
188
189/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100190 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100191 * the neigh_ifinfo
192 * @neigh_ifinfo: the neigh_ifinfo object to release
193 */
194void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
195{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100196 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
Simon Wunderlich89652332013-11-13 19:14:46 +0100197}
198
199/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100200 * batadv_hardif_neigh_release - release hardif neigh node from lists and
201 * queue for free after rcu grace period
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100202 * @ref: kref pointer of the neigh_node
Marek Lindnercef63412015-08-04 21:09:55 +0800203 */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100204static void batadv_hardif_neigh_release(struct kref *ref)
Marek Lindnercef63412015-08-04 21:09:55 +0800205{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100206 struct batadv_hardif_neigh_node *hardif_neigh;
207
208 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
209 refcount);
210
Sven Eckelmannf6389692016-01-05 12:06:23 +0100211 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
212 hlist_del_init_rcu(&hardif_neigh->list);
213 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100214
Sven Eckelmannf6389692016-01-05 12:06:23 +0100215 batadv_hardif_free_ref(hardif_neigh->if_incoming);
216 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800217}
218
219/**
220 * batadv_hardif_neigh_free_ref - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100221 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800222 * @hardif_neigh: hardif neigh neighbor to free
223 */
224void batadv_hardif_neigh_free_ref(struct batadv_hardif_neigh_node *hardif_neigh)
225{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100226 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
Marek Lindnercef63412015-08-04 21:09:55 +0800227}
228
229/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100230 * batadv_neigh_node_release - release neigh_node from lists and queue for
231 * free after rcu grace period
232 * @neigh_node: neigh neighbor to free
Simon Wunderlich89652332013-11-13 19:14:46 +0100233 */
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100234static void batadv_neigh_node_release(struct batadv_neigh_node *neigh_node)
Simon Wunderlich89652332013-11-13 19:14:46 +0100235{
236 struct hlist_node *node_tmp;
Marek Lindnercef63412015-08-04 21:09:55 +0800237 struct batadv_hardif_neigh_node *hardif_neigh;
Simon Wunderlich89652332013-11-13 19:14:46 +0100238 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800239 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100240
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800241 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100242
243 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
244 &neigh_node->ifinfo_list, list) {
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100245 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100246 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800247
Marek Lindnercef63412015-08-04 21:09:55 +0800248 hardif_neigh = batadv_hardif_neigh_get(neigh_node->if_incoming,
249 neigh_node->addr);
250 if (hardif_neigh) {
251 /* batadv_hardif_neigh_get() increases refcount too */
Sven Eckelmannf6389692016-01-05 12:06:23 +0100252 batadv_hardif_neigh_free_ref(hardif_neigh);
253 batadv_hardif_neigh_free_ref(hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800254 }
255
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800256 if (bao->bat_neigh_free)
257 bao->bat_neigh_free(neigh_node);
258
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100259 batadv_hardif_free_ref(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100260
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100261 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100262}
263
264/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100265 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100266 * and possibly release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100267 * @neigh_node: neigh neighbor to free
268 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200269void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000270{
Marek Lindner44524fc2011-02-10 14:33:53 +0000271 if (atomic_dec_and_test(&neigh_node->refcount))
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100272 batadv_neigh_node_release(neigh_node);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000273}
274
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100275/**
276 * batadv_orig_node_get_router - router to the originator depending on iface
277 * @orig_node: the orig node for the router
278 * @if_outgoing: the interface where the payload packet has been received or
279 * the OGM should be sent to
280 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200281 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100282 *
283 * The object is returned with refcounter increased by 1.
284 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200285struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100286batadv_orig_router_get(struct batadv_orig_node *orig_node,
287 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000288{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100289 struct batadv_orig_ifinfo *orig_ifinfo;
290 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000291
292 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100293 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
294 if (orig_ifinfo->if_outgoing != if_outgoing)
295 continue;
296
297 router = rcu_dereference(orig_ifinfo->router);
298 break;
299 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000300
301 if (router && !atomic_inc_not_zero(&router->refcount))
302 router = NULL;
303
304 rcu_read_unlock();
305 return router;
306}
307
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200308/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100309 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
310 * @orig_node: the orig node to be queried
311 * @if_outgoing: the interface for which the ifinfo should be acquired
312 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200313 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100314 *
315 * The object is returned with refcounter increased by 1.
316 */
317struct batadv_orig_ifinfo *
318batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
319 struct batadv_hard_iface *if_outgoing)
320{
321 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
322
323 rcu_read_lock();
324 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
325 list) {
326 if (tmp->if_outgoing != if_outgoing)
327 continue;
328
329 if (!atomic_inc_not_zero(&tmp->refcount))
330 continue;
331
332 orig_ifinfo = tmp;
333 break;
334 }
335 rcu_read_unlock();
336
337 return orig_ifinfo;
338}
339
340/**
341 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
342 * @orig_node: the orig node to be queried
343 * @if_outgoing: the interface for which the ifinfo should be acquired
344 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200345 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100346 * interface otherwise. The object is created and added to the list
347 * if it does not exist.
348 *
349 * The object is returned with refcounter increased by 1.
350 */
351struct batadv_orig_ifinfo *
352batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
353 struct batadv_hard_iface *if_outgoing)
354{
355 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
356 unsigned long reset_time;
357
358 spin_lock_bh(&orig_node->neigh_list_lock);
359
360 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
361 if (orig_ifinfo)
362 goto out;
363
364 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
365 if (!orig_ifinfo)
366 goto out;
367
368 if (if_outgoing != BATADV_IF_DEFAULT &&
369 !atomic_inc_not_zero(&if_outgoing->refcount)) {
370 kfree(orig_ifinfo);
371 orig_ifinfo = NULL;
372 goto out;
373 }
374
375 reset_time = jiffies - 1;
376 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
377 orig_ifinfo->batman_seqno_reset = reset_time;
378 orig_ifinfo->if_outgoing = if_outgoing;
379 INIT_HLIST_NODE(&orig_ifinfo->list);
380 atomic_set(&orig_ifinfo->refcount, 2);
381 hlist_add_head_rcu(&orig_ifinfo->list,
382 &orig_node->ifinfo_list);
383out:
384 spin_unlock_bh(&orig_node->neigh_list_lock);
385 return orig_ifinfo;
386}
387
388/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100389 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200390 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100391 * @if_outgoing: the interface for which the ifinfo should be acquired
392 *
393 * The object is returned with refcounter increased by 1.
394 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200395 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100396 */
397struct batadv_neigh_ifinfo *
398batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
399 struct batadv_hard_iface *if_outgoing)
400{
401 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
402 *tmp_neigh_ifinfo;
403
404 rcu_read_lock();
405 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
406 list) {
407 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
408 continue;
409
Sven Eckelmann962c6832016-01-16 10:29:51 +0100410 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100411 continue;
412
413 neigh_ifinfo = tmp_neigh_ifinfo;
414 break;
415 }
416 rcu_read_unlock();
417
418 return neigh_ifinfo;
419}
420
421/**
422 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200423 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100424 * @if_outgoing: the interface for which the ifinfo should be acquired
425 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200426 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100427 * if_outgoing interface otherwise. The object is created and added to the list
428 * if it does not exist.
429 *
430 * The object is returned with refcounter increased by 1.
431 */
432struct batadv_neigh_ifinfo *
433batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
434 struct batadv_hard_iface *if_outgoing)
435{
436 struct batadv_neigh_ifinfo *neigh_ifinfo;
437
438 spin_lock_bh(&neigh->ifinfo_lock);
439
440 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
441 if (neigh_ifinfo)
442 goto out;
443
444 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
445 if (!neigh_ifinfo)
446 goto out;
447
448 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
449 kfree(neigh_ifinfo);
450 neigh_ifinfo = NULL;
451 goto out;
452 }
453
454 INIT_HLIST_NODE(&neigh_ifinfo->list);
Sven Eckelmann962c6832016-01-16 10:29:51 +0100455 kref_init(&neigh_ifinfo->refcount);
456 kref_get(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100457 neigh_ifinfo->if_outgoing = if_outgoing;
458
459 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
460
461out:
462 spin_unlock_bh(&neigh->ifinfo_lock);
463
464 return neigh_ifinfo;
465}
466
467/**
Marek Lindnered292662015-08-04 23:31:44 +0800468 * batadv_neigh_node_get - retrieve a neighbour from the list
469 * @orig_node: originator which the neighbour belongs to
470 * @hard_iface: the interface where this neighbour is connected to
471 * @addr: the address of the neighbour
472 *
473 * Looks for and possibly returns a neighbour belonging to this originator list
474 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200475 *
476 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800477 */
478static struct batadv_neigh_node *
479batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
480 const struct batadv_hard_iface *hard_iface,
481 const u8 *addr)
482{
483 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
484
485 rcu_read_lock();
486 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
487 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
488 continue;
489
490 if (tmp_neigh_node->if_incoming != hard_iface)
491 continue;
492
493 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
494 continue;
495
496 res = tmp_neigh_node;
497 break;
498 }
499 rcu_read_unlock();
500
501 return res;
502}
503
504/**
Marek Lindnercef63412015-08-04 21:09:55 +0800505 * batadv_hardif_neigh_create - create a hardif neighbour node
506 * @hard_iface: the interface this neighbour is connected to
507 * @neigh_addr: the interface address of the neighbour to retrieve
508 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200509 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800510 */
511static struct batadv_hardif_neigh_node *
512batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
513 const u8 *neigh_addr)
514{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800515 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800516 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
517
518 spin_lock_bh(&hard_iface->neigh_list_lock);
519
520 /* check if neighbor hasn't been added in the meantime */
521 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
522 if (hardif_neigh)
523 goto out;
524
525 if (!atomic_inc_not_zero(&hard_iface->refcount))
526 goto out;
527
528 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
529 if (!hardif_neigh) {
530 batadv_hardif_free_ref(hard_iface);
531 goto out;
532 }
533
534 INIT_HLIST_NODE(&hardif_neigh->list);
535 ether_addr_copy(hardif_neigh->addr, neigh_addr);
536 hardif_neigh->if_incoming = hard_iface;
537 hardif_neigh->last_seen = jiffies;
538
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100539 kref_init(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800540
Marek Lindner8248a4c2015-08-04 21:09:56 +0800541 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
542 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
543
Marek Lindnercef63412015-08-04 21:09:55 +0800544 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
545
546out:
547 spin_unlock_bh(&hard_iface->neigh_list_lock);
548 return hardif_neigh;
549}
550
551/**
552 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
553 * node
554 * @hard_iface: the interface this neighbour is connected to
555 * @neigh_addr: the interface address of the neighbour to retrieve
556 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200557 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800558 */
559static struct batadv_hardif_neigh_node *
560batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
561 const u8 *neigh_addr)
562{
563 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
564
565 /* first check without locking to avoid the overhead */
566 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
567 if (hardif_neigh)
568 return hardif_neigh;
569
570 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
571}
572
573/**
574 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
575 * @hard_iface: the interface where this neighbour is connected to
576 * @neigh_addr: the address of the neighbour
577 *
578 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200579 *
580 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800581 */
582struct batadv_hardif_neigh_node *
583batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
584 const u8 *neigh_addr)
585{
586 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
587
588 rcu_read_lock();
589 hlist_for_each_entry_rcu(tmp_hardif_neigh,
590 &hard_iface->neigh_list, list) {
591 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
592 continue;
593
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100594 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800595 continue;
596
597 hardif_neigh = tmp_hardif_neigh;
598 break;
599 }
600 rcu_read_unlock();
601
602 return hardif_neigh;
603}
604
605/**
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200606 * batadv_neigh_node_new - create and init a new neigh_node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800607 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200608 * @hard_iface: the interface where the neighbour is connected to
609 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200610 *
611 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200612 *
613 * Return: neighbor when found. Othwerwise NULL
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200614 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200615struct batadv_neigh_node *
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800616batadv_neigh_node_new(struct batadv_orig_node *orig_node,
617 struct batadv_hard_iface *hard_iface,
618 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000619{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200620 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800621 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000622
Marek Lindner741aa062015-07-26 04:57:43 +0800623 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
624 if (neigh_node)
625 goto out;
626
Marek Lindnercef63412015-08-04 21:09:55 +0800627 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
628 neigh_addr);
629 if (!hardif_neigh)
630 goto out;
631
Sven Eckelmann704509b2011-05-14 23:14:54 +0200632 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000633 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800634 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000635
Marek Lindnerf729dc702015-07-26 04:37:15 +0800636 if (!atomic_inc_not_zero(&hard_iface->refcount)) {
637 kfree(neigh_node);
638 neigh_node = NULL;
639 goto out;
640 }
641
Marek Lindner9591a792010-12-12 21:57:11 +0000642 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100643 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
644 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000645
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100646 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200647 neigh_node->if_incoming = hard_iface;
648 neigh_node->orig_node = orig_node;
649
Marek Lindner1605d0d2011-02-18 12:28:11 +0000650 /* extra reference for return */
651 atomic_set(&neigh_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000652
Marek Lindner741aa062015-07-26 04:57:43 +0800653 spin_lock_bh(&orig_node->neigh_list_lock);
654 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
655 spin_unlock_bh(&orig_node->neigh_list_lock);
656
Marek Lindnercef63412015-08-04 21:09:55 +0800657 /* increment unique neighbor refcount */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100658 kref_get(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800659
Marek Lindner741aa062015-07-26 04:57:43 +0800660 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
661 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
662 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
663
Marek Lindner7ae8b282012-03-01 15:35:21 +0800664out:
Marek Lindnercef63412015-08-04 21:09:55 +0800665 if (hardif_neigh)
666 batadv_hardif_neigh_free_ref(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000667 return neigh_node;
668}
669
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100670/**
Marek Lindner75874052015-08-04 21:09:57 +0800671 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
672 * @seq: neighbour table seq_file struct
673 * @offset: not used
674 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200675 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800676 */
677int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
678{
679 struct net_device *net_dev = (struct net_device *)seq->private;
680 struct batadv_priv *bat_priv = netdev_priv(net_dev);
681 struct batadv_hard_iface *primary_if;
682
683 primary_if = batadv_seq_print_text_primary_if_get(seq);
684 if (!primary_if)
685 return 0;
686
687 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
688 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
689 primary_if->net_dev->dev_addr, net_dev->name,
690 bat_priv->bat_algo_ops->name);
691
692 batadv_hardif_free_ref(primary_if);
693
694 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
695 seq_puts(seq,
696 "No printing function for this routing protocol\n");
697 return 0;
698 }
699
700 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
701 return 0;
702}
703
704/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100705 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
706 * free after rcu grace period
707 * @orig_ifinfo: the orig_ifinfo object to release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100708 */
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100709static void batadv_orig_ifinfo_release(struct batadv_orig_ifinfo *orig_ifinfo)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100710{
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100711 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100712
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100713 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100714 batadv_hardif_free_ref(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100715
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100716 /* this is the last reference to this object */
717 router = rcu_dereference_protected(orig_ifinfo->router, true);
718 if (router)
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100719 batadv_neigh_node_free_ref(router);
720
721 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100722}
723
724/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100725 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100726 * the orig_ifinfo
727 * @orig_ifinfo: the orig_ifinfo object to release
728 */
729void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
730{
731 if (atomic_dec_and_test(&orig_ifinfo->refcount))
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100732 batadv_orig_ifinfo_release(orig_ifinfo);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100733}
734
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100735/**
736 * batadv_orig_node_free_rcu - free the orig_node
737 * @rcu: rcu pointer of the orig_node
738 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200739static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000740{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200741 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000742
Sven Eckelmann56303d32012-06-05 22:31:31 +0200743 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000744
Linus Lüssing60432d72014-02-15 17:47:51 +0100745 batadv_mcast_purge_orig(orig_node);
746
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200747 batadv_frag_purge_orig(orig_node, NULL);
748
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200749 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
750 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
751
Antonio Quartullia73105b2011-04-27 14:27:44 +0200752 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000753 kfree(orig_node);
754}
755
Linus Lüssing72822222013-04-15 21:43:29 +0800756/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100757 * batadv_orig_node_release - release orig_node from lists and queue for
758 * free after rcu grace period
759 * @orig_node: the orig node to free
760 */
761static void batadv_orig_node_release(struct batadv_orig_node *orig_node)
762{
763 struct hlist_node *node_tmp;
764 struct batadv_neigh_node *neigh_node;
765 struct batadv_orig_ifinfo *orig_ifinfo;
766
767 spin_lock_bh(&orig_node->neigh_list_lock);
768
769 /* for all neighbors towards this originator ... */
770 hlist_for_each_entry_safe(neigh_node, node_tmp,
771 &orig_node->neigh_list, list) {
772 hlist_del_rcu(&neigh_node->list);
773 batadv_neigh_node_free_ref(neigh_node);
774 }
775
776 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
777 &orig_node->ifinfo_list, list) {
778 hlist_del_rcu(&orig_ifinfo->list);
779 batadv_orig_ifinfo_free_ref(orig_ifinfo);
780 }
781 spin_unlock_bh(&orig_node->neigh_list_lock);
782
783 /* Free nc_nodes */
784 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
785
786 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
787}
788
789/**
Linus Lüssing72822222013-04-15 21:43:29 +0800790 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100791 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800792 * @orig_node: the orig node to free
793 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200794void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000795{
796 if (atomic_dec_and_test(&orig_node->refcount))
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100797 batadv_orig_node_release(orig_node);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000798}
799
Sven Eckelmann56303d32012-06-05 22:31:31 +0200800void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000801{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200802 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800803 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000804 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000805 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200806 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200807 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000808
809 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000810 return;
811
812 cancel_delayed_work_sync(&bat_priv->orig_work);
813
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000814 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000815
816 for (i = 0; i < hash->size; i++) {
817 head = &hash->table[i];
818 list_lock = &hash->list_locks[i];
819
820 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800821 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000822 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800823 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +0200824 batadv_orig_node_free_ref(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000825 }
826 spin_unlock_bh(list_lock);
827 }
828
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200829 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000830}
831
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200832/**
833 * batadv_orig_node_new - creates a new orig_node
834 * @bat_priv: the bat priv with all the soft interface information
835 * @addr: the mac address of the originator
836 *
837 * Creates a new originator object and initialise all the generic fields.
838 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200839 *
840 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200841 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200842struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200843 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000844{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200845 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200846 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200847 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200848 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000849
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200850 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
851 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000852
Sven Eckelmann704509b2011-05-14 23:14:54 +0200853 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000854 if (!orig_node)
855 return NULL;
856
Marek Lindner9591a792010-12-12 21:57:11 +0000857 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800858 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100859 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000860 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000861 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200862 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200863 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200864 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000865
Martin Hundebølld56b1702013-01-25 11:12:39 +0100866 batadv_nc_init_orig(orig_node);
867
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000868 /* extra reference for return */
869 atomic_set(&orig_node->refcount, 2);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000870
Marek Lindner16b1aba2011-01-19 20:01:42 +0000871 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100872 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100873 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200874 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200875 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200876 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100877 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200878 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
879 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200880
Linus Lüssing60432d72014-02-15 17:47:51 +0100881#ifdef CONFIG_BATMAN_ADV_MCAST
882 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200883 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
884 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
885 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
886 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100887#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000888
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200889 /* create a vlan object for the "untagged" LAN */
890 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
891 if (!vlan)
892 goto free_orig_node;
893 /* batadv_orig_node_vlan_new() increases the refcounter.
894 * Immediately release vlan since it is not needed anymore in this
895 * context
896 */
897 batadv_orig_node_vlan_free_ref(vlan);
898
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200899 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
900 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
901 spin_lock_init(&orig_node->fragments[i].lock);
902 orig_node->fragments[i].size = 0;
903 }
904
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000905 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000906free_orig_node:
907 kfree(orig_node);
908 return NULL;
909}
910
Simon Wunderlich89652332013-11-13 19:14:46 +0100911/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100912 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
913 * @bat_priv: the bat priv with all the soft interface information
914 * @neigh: orig node which is to be checked
915 */
916static void
917batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
918 struct batadv_neigh_node *neigh)
919{
920 struct batadv_neigh_ifinfo *neigh_ifinfo;
921 struct batadv_hard_iface *if_outgoing;
922 struct hlist_node *node_tmp;
923
924 spin_lock_bh(&neigh->ifinfo_lock);
925
926 /* for all ifinfo objects for this neighinator */
927 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
928 &neigh->ifinfo_list, list) {
929 if_outgoing = neigh_ifinfo->if_outgoing;
930
931 /* always keep the default interface */
932 if (if_outgoing == BATADV_IF_DEFAULT)
933 continue;
934
935 /* don't purge if the interface is not (going) down */
936 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
937 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
938 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
939 continue;
940
941 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
942 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
943 neigh->addr, if_outgoing->net_dev->name);
944
945 hlist_del_rcu(&neigh_ifinfo->list);
946 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
947 }
948
949 spin_unlock_bh(&neigh->ifinfo_lock);
950}
951
952/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100953 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
954 * @bat_priv: the bat priv with all the soft interface information
955 * @orig_node: orig node which is to be checked
956 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200957 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100958 */
959static bool
960batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
961 struct batadv_orig_node *orig_node)
962{
963 struct batadv_orig_ifinfo *orig_ifinfo;
964 struct batadv_hard_iface *if_outgoing;
965 struct hlist_node *node_tmp;
966 bool ifinfo_purged = false;
967
968 spin_lock_bh(&orig_node->neigh_list_lock);
969
970 /* for all ifinfo objects for this originator */
971 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
972 &orig_node->ifinfo_list, list) {
973 if_outgoing = orig_ifinfo->if_outgoing;
974
975 /* always keep the default interface */
976 if (if_outgoing == BATADV_IF_DEFAULT)
977 continue;
978
979 /* don't purge if the interface is not (going) down */
980 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
981 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
982 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
983 continue;
984
985 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
986 "router/ifinfo purge: originator %pM, iface: %s\n",
987 orig_node->orig, if_outgoing->net_dev->name);
988
989 ifinfo_purged = true;
990
991 hlist_del_rcu(&orig_ifinfo->list);
992 batadv_orig_ifinfo_free_ref(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +0100993 if (orig_node->last_bonding_candidate == orig_ifinfo) {
994 orig_node->last_bonding_candidate = NULL;
995 batadv_orig_ifinfo_free_ref(orig_ifinfo);
996 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100997 }
998
999 spin_unlock_bh(&orig_node->neigh_list_lock);
1000
1001 return ifinfo_purged;
1002}
1003
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001004/**
Simon Wunderlich89652332013-11-13 19:14:46 +01001005 * batadv_purge_orig_neighbors - purges neighbors from originator
1006 * @bat_priv: the bat priv with all the soft interface information
1007 * @orig_node: orig node which is to be checked
1008 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001009 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001010 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001011static bool
1012batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001013 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001014{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001015 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001016 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001017 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001018 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001019 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001020
Marek Lindnerf987ed62010-12-12 21:57:12 +00001021 spin_lock_bh(&orig_node->neigh_list_lock);
1022
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001023 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001024 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001025 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001026 last_seen = neigh_node->last_seen;
1027 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001028
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001029 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001030 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1031 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1032 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001033 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1034 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1035 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001036 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001037 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1038 orig_node->orig, neigh_node->addr,
1039 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001040 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001041 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001042 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1043 orig_node->orig, neigh_node->addr,
1044 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001045
1046 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001047
Marek Lindnerf987ed62010-12-12 21:57:12 +00001048 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001049 batadv_neigh_node_free_ref(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001050 } else {
1051 /* only necessary if not the whole neighbor is to be
1052 * deleted, but some interface has been removed.
1053 */
1054 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001055 }
1056 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001057
1058 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001059 return neigh_purged;
1060}
1061
Simon Wunderlich89652332013-11-13 19:14:46 +01001062/**
1063 * batadv_find_best_neighbor - finds the best neighbor after purging
1064 * @bat_priv: the bat priv with all the soft interface information
1065 * @orig_node: orig node which is to be checked
1066 * @if_outgoing: the interface for which the metric should be compared
1067 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001068 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001069 */
1070static struct batadv_neigh_node *
1071batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1072 struct batadv_orig_node *orig_node,
1073 struct batadv_hard_iface *if_outgoing)
1074{
1075 struct batadv_neigh_node *best = NULL, *neigh;
1076 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1077
1078 rcu_read_lock();
1079 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1080 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1081 best, if_outgoing) <= 0))
1082 continue;
1083
1084 if (!atomic_inc_not_zero(&neigh->refcount))
1085 continue;
1086
1087 if (best)
1088 batadv_neigh_node_free_ref(best);
1089
1090 best = neigh;
1091 }
1092 rcu_read_unlock();
1093
1094 return best;
1095}
1096
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001097/**
1098 * batadv_purge_orig_node - purges obsolete information from an orig_node
1099 * @bat_priv: the bat priv with all the soft interface information
1100 * @orig_node: orig node which is to be checked
1101 *
1102 * This function checks if the orig_node or substructures of it have become
1103 * obsolete, and purges this information if that's the case.
1104 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001105 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001106 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001107static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1108 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001109{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001110 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001111 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001112 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001113
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001114 if (batadv_has_timed_out(orig_node->last_seen,
1115 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001116 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001117 "Originator timeout: originator %pM, last_seen %u\n",
1118 orig_node->orig,
1119 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001120 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001121 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001122 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1123 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001124
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001125 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001126 return false;
1127
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001128 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001129 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1130 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001131 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1132 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001133 if (best_neigh_node)
1134 batadv_neigh_node_free_ref(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001135
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001136 /* ... then for all other interfaces. */
1137 rcu_read_lock();
1138 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1139 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1140 continue;
1141
1142 if (hard_iface->soft_iface != bat_priv->soft_iface)
1143 continue;
1144
1145 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1146 orig_node,
1147 hard_iface);
1148 batadv_update_route(bat_priv, orig_node, hard_iface,
1149 best_neigh_node);
1150 if (best_neigh_node)
1151 batadv_neigh_node_free_ref(best_neigh_node);
1152 }
1153 rcu_read_unlock();
1154
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001155 return false;
1156}
1157
Sven Eckelmann56303d32012-06-05 22:31:31 +02001158static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001159{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001160 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001161 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001162 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001163 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001164 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001165 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001166
1167 if (!hash)
1168 return;
1169
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001170 /* for all origins... */
1171 for (i = 0; i < hash->size; i++) {
1172 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001173 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001174
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001175 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001176 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001177 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001178 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001179 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001180 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001181 batadv_tt_global_del_orig(orig_node->bat_priv,
1182 orig_node, -1,
1183 "originator timed out");
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001184 batadv_orig_node_free_ref(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001185 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001186 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001187
1188 batadv_frag_purge_orig(orig_node,
1189 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001190 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001191 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001192 }
1193
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001194 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001195}
1196
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001197static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001198{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001199 struct delayed_work *delayed_work;
1200 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001201
Sven Eckelmann56303d32012-06-05 22:31:31 +02001202 delayed_work = container_of(work, struct delayed_work, work);
1203 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001204 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001205 queue_delayed_work(batadv_event_workqueue,
1206 &bat_priv->orig_work,
1207 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001208}
1209
Sven Eckelmann56303d32012-06-05 22:31:31 +02001210void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001211{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001212 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001213}
1214
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001215int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001216{
1217 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001218 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001219 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001220
Marek Lindner30da63a2012-08-03 17:15:46 +02001221 primary_if = batadv_seq_print_text_primary_if_get(seq);
1222 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001223 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001224
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001225 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001226 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001227 primary_if->net_dev->dev_addr, net_dev->name,
1228 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001229
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001230 batadv_hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001231
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001232 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1233 seq_puts(seq,
1234 "No printing function for this routing protocol\n");
1235 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001236 }
1237
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001238 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1239 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001240
Marek Lindner30da63a2012-08-03 17:15:46 +02001241 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001242}
1243
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001244/**
1245 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1246 * outgoing interface
1247 * @seq: debugfs table seq_file struct
1248 * @offset: not used
1249 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001250 * Return: 0
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001251 */
1252int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1253{
1254 struct net_device *net_dev = (struct net_device *)seq->private;
1255 struct batadv_hard_iface *hard_iface;
1256 struct batadv_priv *bat_priv;
1257
1258 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1259
1260 if (!hard_iface || !hard_iface->soft_iface) {
1261 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1262 goto out;
1263 }
1264
1265 bat_priv = netdev_priv(hard_iface->soft_iface);
1266 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1267 seq_puts(seq,
1268 "No printing function for this routing protocol\n");
1269 goto out;
1270 }
1271
1272 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1273 seq_puts(seq, "Interface not active\n");
1274 goto out;
1275 }
1276
1277 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1278 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1279 hard_iface->net_dev->dev_addr,
1280 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1281
1282 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1283
1284out:
Marek Lindner16a41422014-04-24 03:44:25 +08001285 if (hard_iface)
1286 batadv_hardif_free_ref(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001287 return 0;
1288}
1289
Sven Eckelmann56303d32012-06-05 22:31:31 +02001290int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1291 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001292{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001293 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001294 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001295 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001296 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001297 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001298 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001299 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001300
1301 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001302 * if_num
1303 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001304 for (i = 0; i < hash->size; i++) {
1305 head = &hash->table[i];
1306
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001307 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001308 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001309 ret = 0;
1310 if (bao->bat_orig_add_if)
1311 ret = bao->bat_orig_add_if(orig_node,
1312 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001313 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001314 goto err;
1315 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001316 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001317 }
1318
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001319 return 0;
1320
1321err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001322 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001323 return -ENOMEM;
1324}
1325
Sven Eckelmann56303d32012-06-05 22:31:31 +02001326int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1327 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001328{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001329 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001330 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001331 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001332 struct batadv_hard_iface *hard_iface_tmp;
1333 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001334 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001335 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001336 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001337
1338 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001339 * if_num
1340 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001341 for (i = 0; i < hash->size; i++) {
1342 head = &hash->table[i];
1343
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001344 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001345 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001346 ret = 0;
1347 if (bao->bat_orig_del_if)
1348 ret = bao->bat_orig_del_if(orig_node,
1349 max_if_num,
1350 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001351 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001352 goto err;
1353 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001354 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001355 }
1356
1357 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1358 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001359 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001360 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001361 continue;
1362
Marek Lindnere6c10f42011-02-18 12:33:20 +00001363 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001364 continue;
1365
Marek Lindnere6c10f42011-02-18 12:33:20 +00001366 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001367 continue;
1368
Marek Lindnere6c10f42011-02-18 12:33:20 +00001369 if (hard_iface_tmp->if_num > hard_iface->if_num)
1370 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001371 }
1372 rcu_read_unlock();
1373
Marek Lindnere6c10f42011-02-18 12:33:20 +00001374 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001375 return 0;
1376
1377err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001378 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001379 return -ENOMEM;
1380}