blob: c355a824713cd9224e2c705e222c2061cb504b5a [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
Sven Eckelmann7c124392016-01-16 10:29:56 +010021#include <linux/atomic.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020022#include <linux/errno.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
Sven Eckelmann90f564d2016-01-16 10:29:40 +010027#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020028#include <linux/list.h>
29#include <linux/lockdep.h>
30#include <linux/netdevice.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080031#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020032#include <linux/seq_file.h>
33#include <linux/slab.h>
34#include <linux/spinlock.h>
35#include <linux/workqueue.h>
36
37#include "distributed-arp-table.h"
38#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000039#include "gateway_client.h"
40#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020041#include "hash.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010042#include "multicast.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020043#include "network-coding.h"
44#include "routing.h"
45#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000046
Antonio Quartullidec05072012-11-10 11:00:32 +010047/* hash class keys */
48static struct lock_class_key batadv_orig_hash_lock_class_key;
49
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020050static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000051
Sven Eckelmann62fe7102015-09-15 19:00:48 +020052/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010053 * batadv_compare_orig - comparing function used in the originator hash table
54 * @node: node in the local table
55 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020056 *
57 * Return: 1 if they are the same originator
58 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +020059int batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020060{
Sven Eckelmann56303d32012-06-05 22:31:31 +020061 const void *data1 = container_of(node, struct batadv_orig_node,
62 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020063
dingtianhong323813e2013-12-26 19:40:39 +080064 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020065}
66
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020067/**
68 * batadv_orig_node_vlan_get - get an orig_node_vlan object
69 * @orig_node: the originator serving the VLAN
70 * @vid: the VLAN identifier
71 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020072 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020073 * if it does not exist.
74 */
75struct batadv_orig_node_vlan *
76batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
77 unsigned short vid)
78{
79 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
80
81 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080082 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020083 if (tmp->vid != vid)
84 continue;
85
Sven Eckelmann161a3be2016-01-16 10:29:55 +010086 if (!kref_get_unless_zero(&tmp->refcount))
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020087 continue;
88
89 vlan = tmp;
90
91 break;
92 }
93 rcu_read_unlock();
94
95 return vlan;
96}
97
98/**
99 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
100 * object
101 * @orig_node: the originator serving the VLAN
102 * @vid: the VLAN identifier
103 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200104 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200105 * belonging to orig_node otherwise. The object is created and added to the list
106 * if it does not exist.
107 *
108 * The object is returned with refcounter increased by 1.
109 */
110struct batadv_orig_node_vlan *
111batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
112 unsigned short vid)
113{
114 struct batadv_orig_node_vlan *vlan;
115
116 spin_lock_bh(&orig_node->vlan_list_lock);
117
118 /* first look if an object for this vid already exists */
119 vlan = batadv_orig_node_vlan_get(orig_node, vid);
120 if (vlan)
121 goto out;
122
123 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
124 if (!vlan)
125 goto out;
126
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100127 kref_init(&vlan->refcount);
128 kref_get(&vlan->refcount);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200129 vlan->vid = vid;
130
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800131 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200132
133out:
134 spin_unlock_bh(&orig_node->vlan_list_lock);
135
136 return vlan;
137}
138
139/**
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100140 * batadv_orig_node_vlan_release - release originator-vlan object from lists
141 * and queue for free after rcu grace period
142 * @ref: kref pointer of the originator-vlan object
143 */
144static void batadv_orig_node_vlan_release(struct kref *ref)
145{
146 struct batadv_orig_node_vlan *orig_vlan;
147
148 orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
149
150 kfree_rcu(orig_vlan, rcu);
151}
152
153/**
Sven Eckelmann21754e22016-01-17 11:01:24 +0100154 * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
155 * the originator-vlan object
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200156 * @orig_vlan: the originator-vlan object to release
157 */
Sven Eckelmann21754e22016-01-17 11:01:24 +0100158void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200159{
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100160 kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200161}
162
Sven Eckelmann56303d32012-06-05 22:31:31 +0200163int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164{
165 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200166 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200168 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000169
170 if (!bat_priv->orig_hash)
171 goto err;
172
Antonio Quartullidec05072012-11-10 11:00:32 +0100173 batadv_hash_set_lock_class(bat_priv->orig_hash,
174 &batadv_orig_hash_lock_class_key);
175
Antonio Quartulli72414442012-12-25 13:14:37 +0100176 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
177 queue_delayed_work(batadv_event_workqueue,
178 &bat_priv->orig_work,
179 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
180
Sven Eckelmann5346c352012-05-05 13:27:28 +0200181 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000182
183err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200184 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000185}
186
Simon Wunderlich89652332013-11-13 19:14:46 +0100187/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100188 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
189 * free after rcu grace period
Sven Eckelmann962c6832016-01-16 10:29:51 +0100190 * @ref: kref pointer of the neigh_ifinfo
Simon Wunderlich89652332013-11-13 19:14:46 +0100191 */
Sven Eckelmann962c6832016-01-16 10:29:51 +0100192static void batadv_neigh_ifinfo_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100193{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100194 struct batadv_neigh_ifinfo *neigh_ifinfo;
195
196 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
197
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100198 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100199 batadv_hardif_put(neigh_ifinfo->if_outgoing);
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100200
201 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100202}
203
204/**
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100205 * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100206 * the neigh_ifinfo
207 * @neigh_ifinfo: the neigh_ifinfo object to release
208 */
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100209void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
Simon Wunderlich89652332013-11-13 19:14:46 +0100210{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100211 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
Simon Wunderlich89652332013-11-13 19:14:46 +0100212}
213
214/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100215 * batadv_hardif_neigh_release - release hardif neigh node from lists and
216 * queue for free after rcu grace period
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100217 * @ref: kref pointer of the neigh_node
Marek Lindnercef63412015-08-04 21:09:55 +0800218 */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100219static void batadv_hardif_neigh_release(struct kref *ref)
Marek Lindnercef63412015-08-04 21:09:55 +0800220{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100221 struct batadv_hardif_neigh_node *hardif_neigh;
222
223 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
224 refcount);
225
Sven Eckelmannf6389692016-01-05 12:06:23 +0100226 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
227 hlist_del_init_rcu(&hardif_neigh->list);
228 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100229
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100230 batadv_hardif_put(hardif_neigh->if_incoming);
Sven Eckelmannf6389692016-01-05 12:06:23 +0100231 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800232}
233
234/**
Sven Eckelmannaccadc32016-01-17 11:01:14 +0100235 * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100236 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800237 * @hardif_neigh: hardif neigh neighbor to free
238 */
Sven Eckelmannaccadc32016-01-17 11:01:14 +0100239void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
Marek Lindnercef63412015-08-04 21:09:55 +0800240{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100241 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
Marek Lindnercef63412015-08-04 21:09:55 +0800242}
243
244/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100245 * batadv_neigh_node_release - release neigh_node from lists and queue for
246 * free after rcu grace period
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100247 * @ref: kref pointer of the neigh_node
Simon Wunderlich89652332013-11-13 19:14:46 +0100248 */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100249static void batadv_neigh_node_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100250{
251 struct hlist_node *node_tmp;
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100252 struct batadv_neigh_node *neigh_node;
Simon Wunderlich89652332013-11-13 19:14:46 +0100253 struct batadv_neigh_ifinfo *neigh_ifinfo;
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800254 struct batadv_algo_ops *bao;
Simon Wunderlich89652332013-11-13 19:14:46 +0100255
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100256 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800257 bao = neigh_node->orig_node->bat_priv->bat_algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +0100258
259 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
260 &neigh_node->ifinfo_list, list) {
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100261 batadv_neigh_ifinfo_put(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100262 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800263
Sven Eckelmannabe59c62016-03-11 16:44:06 +0100264 batadv_hardif_neigh_put(neigh_node->hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800265
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800266 if (bao->bat_neigh_free)
267 bao->bat_neigh_free(neigh_node);
268
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100269 batadv_hardif_put(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100270
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100271 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100272}
273
274/**
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100275 * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100276 * release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100277 * @neigh_node: neigh neighbor to free
278 */
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100279void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000280{
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100281 kref_put(&neigh_node->refcount, batadv_neigh_node_release);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000282}
283
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100284/**
285 * batadv_orig_node_get_router - router to the originator depending on iface
286 * @orig_node: the orig node for the router
287 * @if_outgoing: the interface where the payload packet has been received or
288 * the OGM should be sent to
289 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200290 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100291 *
292 * The object is returned with refcounter increased by 1.
293 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200294struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100295batadv_orig_router_get(struct batadv_orig_node *orig_node,
296 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000297{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100298 struct batadv_orig_ifinfo *orig_ifinfo;
299 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000300
301 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100302 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
303 if (orig_ifinfo->if_outgoing != if_outgoing)
304 continue;
305
306 router = rcu_dereference(orig_ifinfo->router);
307 break;
308 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000309
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100310 if (router && !kref_get_unless_zero(&router->refcount))
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000311 router = NULL;
312
313 rcu_read_unlock();
314 return router;
315}
316
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200317/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100318 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
319 * @orig_node: the orig node to be queried
320 * @if_outgoing: the interface for which the ifinfo should be acquired
321 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200322 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100323 *
324 * The object is returned with refcounter increased by 1.
325 */
326struct batadv_orig_ifinfo *
327batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
328 struct batadv_hard_iface *if_outgoing)
329{
330 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
331
332 rcu_read_lock();
333 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
334 list) {
335 if (tmp->if_outgoing != if_outgoing)
336 continue;
337
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100338 if (!kref_get_unless_zero(&tmp->refcount))
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100339 continue;
340
341 orig_ifinfo = tmp;
342 break;
343 }
344 rcu_read_unlock();
345
346 return orig_ifinfo;
347}
348
349/**
350 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
351 * @orig_node: the orig node to be queried
352 * @if_outgoing: the interface for which the ifinfo should be acquired
353 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200354 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100355 * interface otherwise. The object is created and added to the list
356 * if it does not exist.
357 *
358 * The object is returned with refcounter increased by 1.
359 */
360struct batadv_orig_ifinfo *
361batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
362 struct batadv_hard_iface *if_outgoing)
363{
364 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
365 unsigned long reset_time;
366
367 spin_lock_bh(&orig_node->neigh_list_lock);
368
369 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
370 if (orig_ifinfo)
371 goto out;
372
373 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
374 if (!orig_ifinfo)
375 goto out;
376
377 if (if_outgoing != BATADV_IF_DEFAULT &&
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100378 !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100379 kfree(orig_ifinfo);
380 orig_ifinfo = NULL;
381 goto out;
382 }
383
384 reset_time = jiffies - 1;
385 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
386 orig_ifinfo->batman_seqno_reset = reset_time;
387 orig_ifinfo->if_outgoing = if_outgoing;
388 INIT_HLIST_NODE(&orig_ifinfo->list);
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100389 kref_init(&orig_ifinfo->refcount);
390 kref_get(&orig_ifinfo->refcount);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100391 hlist_add_head_rcu(&orig_ifinfo->list,
392 &orig_node->ifinfo_list);
393out:
394 spin_unlock_bh(&orig_node->neigh_list_lock);
395 return orig_ifinfo;
396}
397
398/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100399 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200400 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100401 * @if_outgoing: the interface for which the ifinfo should be acquired
402 *
403 * The object is returned with refcounter increased by 1.
404 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200405 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100406 */
407struct batadv_neigh_ifinfo *
408batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
409 struct batadv_hard_iface *if_outgoing)
410{
411 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
412 *tmp_neigh_ifinfo;
413
414 rcu_read_lock();
415 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
416 list) {
417 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
418 continue;
419
Sven Eckelmann962c6832016-01-16 10:29:51 +0100420 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100421 continue;
422
423 neigh_ifinfo = tmp_neigh_ifinfo;
424 break;
425 }
426 rcu_read_unlock();
427
428 return neigh_ifinfo;
429}
430
431/**
432 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200433 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100434 * @if_outgoing: the interface for which the ifinfo should be acquired
435 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200436 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100437 * if_outgoing interface otherwise. The object is created and added to the list
438 * if it does not exist.
439 *
440 * The object is returned with refcounter increased by 1.
441 */
442struct batadv_neigh_ifinfo *
443batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
444 struct batadv_hard_iface *if_outgoing)
445{
446 struct batadv_neigh_ifinfo *neigh_ifinfo;
447
448 spin_lock_bh(&neigh->ifinfo_lock);
449
450 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
451 if (neigh_ifinfo)
452 goto out;
453
454 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
455 if (!neigh_ifinfo)
456 goto out;
457
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100458 if (if_outgoing && !kref_get_unless_zero(&if_outgoing->refcount)) {
Simon Wunderlich89652332013-11-13 19:14:46 +0100459 kfree(neigh_ifinfo);
460 neigh_ifinfo = NULL;
461 goto out;
462 }
463
464 INIT_HLIST_NODE(&neigh_ifinfo->list);
Sven Eckelmann962c6832016-01-16 10:29:51 +0100465 kref_init(&neigh_ifinfo->refcount);
466 kref_get(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100467 neigh_ifinfo->if_outgoing = if_outgoing;
468
469 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
470
471out:
472 spin_unlock_bh(&neigh->ifinfo_lock);
473
474 return neigh_ifinfo;
475}
476
477/**
Marek Lindnered292662015-08-04 23:31:44 +0800478 * batadv_neigh_node_get - retrieve a neighbour from the list
479 * @orig_node: originator which the neighbour belongs to
480 * @hard_iface: the interface where this neighbour is connected to
481 * @addr: the address of the neighbour
482 *
483 * Looks for and possibly returns a neighbour belonging to this originator list
484 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200485 *
486 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800487 */
488static struct batadv_neigh_node *
489batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
490 const struct batadv_hard_iface *hard_iface,
491 const u8 *addr)
492{
493 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
494
495 rcu_read_lock();
496 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
497 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
498 continue;
499
500 if (tmp_neigh_node->if_incoming != hard_iface)
501 continue;
502
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100503 if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
Marek Lindnered292662015-08-04 23:31:44 +0800504 continue;
505
506 res = tmp_neigh_node;
507 break;
508 }
509 rcu_read_unlock();
510
511 return res;
512}
513
514/**
Marek Lindnercef63412015-08-04 21:09:55 +0800515 * batadv_hardif_neigh_create - create a hardif neighbour node
516 * @hard_iface: the interface this neighbour is connected to
517 * @neigh_addr: the interface address of the neighbour to retrieve
518 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200519 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800520 */
521static struct batadv_hardif_neigh_node *
522batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
523 const u8 *neigh_addr)
524{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800525 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800526 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
527
528 spin_lock_bh(&hard_iface->neigh_list_lock);
529
530 /* check if neighbor hasn't been added in the meantime */
531 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
532 if (hardif_neigh)
533 goto out;
534
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100535 if (!kref_get_unless_zero(&hard_iface->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800536 goto out;
537
538 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
539 if (!hardif_neigh) {
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100540 batadv_hardif_put(hard_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800541 goto out;
542 }
543
544 INIT_HLIST_NODE(&hardif_neigh->list);
545 ether_addr_copy(hardif_neigh->addr, neigh_addr);
546 hardif_neigh->if_incoming = hard_iface;
547 hardif_neigh->last_seen = jiffies;
548
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100549 kref_init(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800550
Marek Lindner8248a4c2015-08-04 21:09:56 +0800551 if (bat_priv->bat_algo_ops->bat_hardif_neigh_init)
552 bat_priv->bat_algo_ops->bat_hardif_neigh_init(hardif_neigh);
553
Marek Lindnercef63412015-08-04 21:09:55 +0800554 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
555
556out:
557 spin_unlock_bh(&hard_iface->neigh_list_lock);
558 return hardif_neigh;
559}
560
561/**
562 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
563 * node
564 * @hard_iface: the interface this neighbour is connected to
565 * @neigh_addr: the interface address of the neighbour to retrieve
566 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200567 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800568 */
569static struct batadv_hardif_neigh_node *
570batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
571 const u8 *neigh_addr)
572{
573 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
574
575 /* first check without locking to avoid the overhead */
576 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
577 if (hardif_neigh)
578 return hardif_neigh;
579
580 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
581}
582
583/**
584 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
585 * @hard_iface: the interface where this neighbour is connected to
586 * @neigh_addr: the address of the neighbour
587 *
588 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200589 *
590 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800591 */
592struct batadv_hardif_neigh_node *
593batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
594 const u8 *neigh_addr)
595{
596 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
597
598 rcu_read_lock();
599 hlist_for_each_entry_rcu(tmp_hardif_neigh,
600 &hard_iface->neigh_list, list) {
601 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
602 continue;
603
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100604 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800605 continue;
606
607 hardif_neigh = tmp_hardif_neigh;
608 break;
609 }
610 rcu_read_unlock();
611
612 return hardif_neigh;
613}
614
615/**
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200616 * batadv_neigh_node_new - create and init a new neigh_node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800617 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200618 * @hard_iface: the interface where the neighbour is connected to
619 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200620 *
621 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200622 *
623 * Return: neighbor when found. Othwerwise NULL
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200624 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200625struct batadv_neigh_node *
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800626batadv_neigh_node_new(struct batadv_orig_node *orig_node,
627 struct batadv_hard_iface *hard_iface,
628 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000629{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200630 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800631 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000632
Marek Lindner741aa062015-07-26 04:57:43 +0800633 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
634 if (neigh_node)
635 goto out;
636
Marek Lindnercef63412015-08-04 21:09:55 +0800637 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
638 neigh_addr);
639 if (!hardif_neigh)
640 goto out;
641
Sven Eckelmann704509b2011-05-14 23:14:54 +0200642 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000643 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800644 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000645
Sven Eckelmann7a659d52016-01-16 10:29:54 +0100646 if (!kref_get_unless_zero(&hard_iface->refcount)) {
Marek Lindnerf729dc702015-07-26 04:37:15 +0800647 kfree(neigh_node);
648 neigh_node = NULL;
649 goto out;
650 }
651
Marek Lindner9591a792010-12-12 21:57:11 +0000652 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100653 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
654 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000655
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100656 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200657 neigh_node->if_incoming = hard_iface;
658 neigh_node->orig_node = orig_node;
Marek Lindnere48474e2016-03-11 16:01:09 +0100659 neigh_node->last_seen = jiffies;
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200660
Sven Eckelmannabe59c62016-03-11 16:44:06 +0100661 /* increment unique neighbor refcount */
662 kref_get(&hardif_neigh->refcount);
663 neigh_node->hardif_neigh = hardif_neigh;
664
Marek Lindner1605d0d2011-02-18 12:28:11 +0000665 /* extra reference for return */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100666 kref_init(&neigh_node->refcount);
667 kref_get(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000668
Marek Lindner741aa062015-07-26 04:57:43 +0800669 spin_lock_bh(&orig_node->neigh_list_lock);
670 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
671 spin_unlock_bh(&orig_node->neigh_list_lock);
672
673 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
674 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
675 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
676
Marek Lindner7ae8b282012-03-01 15:35:21 +0800677out:
Marek Lindnercef63412015-08-04 21:09:55 +0800678 if (hardif_neigh)
Sven Eckelmannaccadc32016-01-17 11:01:14 +0100679 batadv_hardif_neigh_put(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000680 return neigh_node;
681}
682
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100683/**
Marek Lindner75874052015-08-04 21:09:57 +0800684 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
685 * @seq: neighbour table seq_file struct
686 * @offset: not used
687 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200688 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800689 */
690int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
691{
692 struct net_device *net_dev = (struct net_device *)seq->private;
693 struct batadv_priv *bat_priv = netdev_priv(net_dev);
694 struct batadv_hard_iface *primary_if;
695
696 primary_if = batadv_seq_print_text_primary_if_get(seq);
697 if (!primary_if)
698 return 0;
699
700 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
701 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
702 primary_if->net_dev->dev_addr, net_dev->name,
703 bat_priv->bat_algo_ops->name);
704
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100705 batadv_hardif_put(primary_if);
Marek Lindner75874052015-08-04 21:09:57 +0800706
707 if (!bat_priv->bat_algo_ops->bat_neigh_print) {
708 seq_puts(seq,
709 "No printing function for this routing protocol\n");
710 return 0;
711 }
712
713 bat_priv->bat_algo_ops->bat_neigh_print(bat_priv, seq);
714 return 0;
715}
716
717/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100718 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
719 * free after rcu grace period
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100720 * @ref: kref pointer of the orig_ifinfo
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100721 */
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100722static void batadv_orig_ifinfo_release(struct kref *ref)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100723{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100724 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100725 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100726
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100727 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
728
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100729 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100730 batadv_hardif_put(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100731
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100732 /* this is the last reference to this object */
733 router = rcu_dereference_protected(orig_ifinfo->router, true);
734 if (router)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100735 batadv_neigh_node_put(router);
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100736
737 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100738}
739
740/**
Sven Eckelmann35f94772016-01-17 11:01:13 +0100741 * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100742 * the orig_ifinfo
743 * @orig_ifinfo: the orig_ifinfo object to release
744 */
Sven Eckelmann35f94772016-01-17 11:01:13 +0100745void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100746{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100747 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100748}
749
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100750/**
751 * batadv_orig_node_free_rcu - free the orig_node
752 * @rcu: rcu pointer of the orig_node
753 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200754static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000755{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200756 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000757
Sven Eckelmann56303d32012-06-05 22:31:31 +0200758 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000759
Linus Lüssing60432d72014-02-15 17:47:51 +0100760 batadv_mcast_purge_orig(orig_node);
761
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200762 batadv_frag_purge_orig(orig_node, NULL);
763
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200764 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
765 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
766
Antonio Quartullia73105b2011-04-27 14:27:44 +0200767 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000768 kfree(orig_node);
769}
770
Linus Lüssing72822222013-04-15 21:43:29 +0800771/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100772 * batadv_orig_node_release - release orig_node from lists and queue for
773 * free after rcu grace period
Sven Eckelmann7c124392016-01-16 10:29:56 +0100774 * @ref: kref pointer of the orig_node
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100775 */
Sven Eckelmann7c124392016-01-16 10:29:56 +0100776static void batadv_orig_node_release(struct kref *ref)
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100777{
778 struct hlist_node *node_tmp;
779 struct batadv_neigh_node *neigh_node;
Sven Eckelmann7c124392016-01-16 10:29:56 +0100780 struct batadv_orig_node *orig_node;
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100781 struct batadv_orig_ifinfo *orig_ifinfo;
782
Sven Eckelmann7c124392016-01-16 10:29:56 +0100783 orig_node = container_of(ref, struct batadv_orig_node, refcount);
784
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100785 spin_lock_bh(&orig_node->neigh_list_lock);
786
787 /* for all neighbors towards this originator ... */
788 hlist_for_each_entry_safe(neigh_node, node_tmp,
789 &orig_node->neigh_list, list) {
790 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100791 batadv_neigh_node_put(neigh_node);
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100792 }
793
794 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
795 &orig_node->ifinfo_list, list) {
796 hlist_del_rcu(&orig_ifinfo->list);
Sven Eckelmann35f94772016-01-17 11:01:13 +0100797 batadv_orig_ifinfo_put(orig_ifinfo);
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100798 }
799 spin_unlock_bh(&orig_node->neigh_list_lock);
800
801 /* Free nc_nodes */
802 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
803
804 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
805}
806
807/**
Sven Eckelmann5d967312016-01-17 11:01:09 +0100808 * batadv_orig_node_put - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100809 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800810 * @orig_node: the orig node to free
811 */
Sven Eckelmann5d967312016-01-17 11:01:09 +0100812void batadv_orig_node_put(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000813{
Sven Eckelmann7c124392016-01-16 10:29:56 +0100814 kref_put(&orig_node->refcount, batadv_orig_node_release);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000815}
816
Sven Eckelmann56303d32012-06-05 22:31:31 +0200817void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000818{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200819 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800820 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000821 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000822 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200823 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200824 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000825
826 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000827 return;
828
829 cancel_delayed_work_sync(&bat_priv->orig_work);
830
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000831 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000832
833 for (i = 0; i < hash->size; i++) {
834 head = &hash->table[i];
835 list_lock = &hash->list_locks[i];
836
837 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800838 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000839 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800840 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann5d967312016-01-17 11:01:09 +0100841 batadv_orig_node_put(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000842 }
843 spin_unlock_bh(list_lock);
844 }
845
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200846 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000847}
848
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200849/**
850 * batadv_orig_node_new - creates a new orig_node
851 * @bat_priv: the bat priv with all the soft interface information
852 * @addr: the mac address of the originator
853 *
854 * Creates a new originator object and initialise all the generic fields.
855 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200856 *
857 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200858 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200859struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200860 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000861{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200862 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200863 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200864 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200865 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000866
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200867 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
868 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000869
Sven Eckelmann704509b2011-05-14 23:14:54 +0200870 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000871 if (!orig_node)
872 return NULL;
873
Marek Lindner9591a792010-12-12 21:57:11 +0000874 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800875 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100876 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000877 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000878 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200879 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200880 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200881 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000882
Martin Hundebølld56b1702013-01-25 11:12:39 +0100883 batadv_nc_init_orig(orig_node);
884
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000885 /* extra reference for return */
Sven Eckelmann7c124392016-01-16 10:29:56 +0100886 kref_init(&orig_node->refcount);
887 kref_get(&orig_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000888
Marek Lindner16b1aba2011-01-19 20:01:42 +0000889 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100890 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100891 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200892 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200893 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200894 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +0100895 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200896 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
897 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200898
Linus Lüssing60432d72014-02-15 17:47:51 +0100899#ifdef CONFIG_BATMAN_ADV_MCAST
900 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +0200901 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
902 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
903 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
904 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +0100905#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000906
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200907 /* create a vlan object for the "untagged" LAN */
908 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
909 if (!vlan)
910 goto free_orig_node;
911 /* batadv_orig_node_vlan_new() increases the refcounter.
912 * Immediately release vlan since it is not needed anymore in this
913 * context
914 */
Sven Eckelmann21754e22016-01-17 11:01:24 +0100915 batadv_orig_node_vlan_put(vlan);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200916
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200917 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
918 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
919 spin_lock_init(&orig_node->fragments[i].lock);
920 orig_node->fragments[i].size = 0;
921 }
922
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000923 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000924free_orig_node:
925 kfree(orig_node);
926 return NULL;
927}
928
Simon Wunderlich89652332013-11-13 19:14:46 +0100929/**
Simon Wunderlich709de132014-03-26 15:46:24 +0100930 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
931 * @bat_priv: the bat priv with all the soft interface information
932 * @neigh: orig node which is to be checked
933 */
934static void
935batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
936 struct batadv_neigh_node *neigh)
937{
938 struct batadv_neigh_ifinfo *neigh_ifinfo;
939 struct batadv_hard_iface *if_outgoing;
940 struct hlist_node *node_tmp;
941
942 spin_lock_bh(&neigh->ifinfo_lock);
943
944 /* for all ifinfo objects for this neighinator */
945 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
946 &neigh->ifinfo_list, list) {
947 if_outgoing = neigh_ifinfo->if_outgoing;
948
949 /* always keep the default interface */
950 if (if_outgoing == BATADV_IF_DEFAULT)
951 continue;
952
953 /* don't purge if the interface is not (going) down */
954 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
955 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
956 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
957 continue;
958
959 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
960 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
961 neigh->addr, if_outgoing->net_dev->name);
962
963 hlist_del_rcu(&neigh_ifinfo->list);
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100964 batadv_neigh_ifinfo_put(neigh_ifinfo);
Simon Wunderlich709de132014-03-26 15:46:24 +0100965 }
966
967 spin_unlock_bh(&neigh->ifinfo_lock);
968}
969
970/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100971 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
972 * @bat_priv: the bat priv with all the soft interface information
973 * @orig_node: orig node which is to be checked
974 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200975 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100976 */
977static bool
978batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
979 struct batadv_orig_node *orig_node)
980{
981 struct batadv_orig_ifinfo *orig_ifinfo;
982 struct batadv_hard_iface *if_outgoing;
983 struct hlist_node *node_tmp;
984 bool ifinfo_purged = false;
985
986 spin_lock_bh(&orig_node->neigh_list_lock);
987
988 /* for all ifinfo objects for this originator */
989 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
990 &orig_node->ifinfo_list, list) {
991 if_outgoing = orig_ifinfo->if_outgoing;
992
993 /* always keep the default interface */
994 if (if_outgoing == BATADV_IF_DEFAULT)
995 continue;
996
997 /* don't purge if the interface is not (going) down */
998 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
999 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
1000 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
1001 continue;
1002
1003 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1004 "router/ifinfo purge: originator %pM, iface: %s\n",
1005 orig_node->orig, if_outgoing->net_dev->name);
1006
1007 ifinfo_purged = true;
1008
1009 hlist_del_rcu(&orig_ifinfo->list);
Sven Eckelmann35f94772016-01-17 11:01:13 +01001010 batadv_orig_ifinfo_put(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +01001011 if (orig_node->last_bonding_candidate == orig_ifinfo) {
1012 orig_node->last_bonding_candidate = NULL;
Sven Eckelmann35f94772016-01-17 11:01:13 +01001013 batadv_orig_ifinfo_put(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +01001014 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001015 }
1016
1017 spin_unlock_bh(&orig_node->neigh_list_lock);
1018
1019 return ifinfo_purged;
1020}
1021
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001022/**
Simon Wunderlich89652332013-11-13 19:14:46 +01001023 * batadv_purge_orig_neighbors - purges neighbors from originator
1024 * @bat_priv: the bat priv with all the soft interface information
1025 * @orig_node: orig node which is to be checked
1026 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001027 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001028 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001029static bool
1030batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001031 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001032{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001033 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001034 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001035 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001036 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001037 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001038
Marek Lindnerf987ed62010-12-12 21:57:12 +00001039 spin_lock_bh(&orig_node->neigh_list_lock);
1040
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001041 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001042 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001043 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001044 last_seen = neigh_node->last_seen;
1045 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001046
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001047 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001048 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1049 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1050 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001051 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1052 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1053 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001054 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001055 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1056 orig_node->orig, neigh_node->addr,
1057 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001058 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001059 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001060 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1061 orig_node->orig, neigh_node->addr,
1062 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001063
1064 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001065
Marek Lindnerf987ed62010-12-12 21:57:12 +00001066 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001067 batadv_neigh_node_put(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001068 } else {
1069 /* only necessary if not the whole neighbor is to be
1070 * deleted, but some interface has been removed.
1071 */
1072 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001073 }
1074 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001075
1076 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001077 return neigh_purged;
1078}
1079
Simon Wunderlich89652332013-11-13 19:14:46 +01001080/**
1081 * batadv_find_best_neighbor - finds the best neighbor after purging
1082 * @bat_priv: the bat priv with all the soft interface information
1083 * @orig_node: orig node which is to be checked
1084 * @if_outgoing: the interface for which the metric should be compared
1085 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001086 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001087 */
1088static struct batadv_neigh_node *
1089batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1090 struct batadv_orig_node *orig_node,
1091 struct batadv_hard_iface *if_outgoing)
1092{
1093 struct batadv_neigh_node *best = NULL, *neigh;
1094 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1095
1096 rcu_read_lock();
1097 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
1098 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
1099 best, if_outgoing) <= 0))
1100 continue;
1101
Sven Eckelmann77ae32e2016-01-16 10:29:53 +01001102 if (!kref_get_unless_zero(&neigh->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +01001103 continue;
1104
1105 if (best)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001106 batadv_neigh_node_put(best);
Simon Wunderlich89652332013-11-13 19:14:46 +01001107
1108 best = neigh;
1109 }
1110 rcu_read_unlock();
1111
1112 return best;
1113}
1114
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001115/**
1116 * batadv_purge_orig_node - purges obsolete information from an orig_node
1117 * @bat_priv: the bat priv with all the soft interface information
1118 * @orig_node: orig node which is to be checked
1119 *
1120 * This function checks if the orig_node or substructures of it have become
1121 * obsolete, and purges this information if that's the case.
1122 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001123 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001124 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001125static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1126 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001127{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001128 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001129 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001130 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001131
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001132 if (batadv_has_timed_out(orig_node->last_seen,
1133 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001134 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001135 "Originator timeout: originator %pM, last_seen %u\n",
1136 orig_node->orig,
1137 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001138 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001139 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001140 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1141 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001142
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001143 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001144 return false;
1145
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001146 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001147 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1148 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001149 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1150 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001151 if (best_neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001152 batadv_neigh_node_put(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001153
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001154 /* ... then for all other interfaces. */
1155 rcu_read_lock();
1156 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1157 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1158 continue;
1159
1160 if (hard_iface->soft_iface != bat_priv->soft_iface)
1161 continue;
1162
1163 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1164 orig_node,
1165 hard_iface);
1166 batadv_update_route(bat_priv, orig_node, hard_iface,
1167 best_neigh_node);
1168 if (best_neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001169 batadv_neigh_node_put(best_neigh_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001170 }
1171 rcu_read_unlock();
1172
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001173 return false;
1174}
1175
Sven Eckelmann56303d32012-06-05 22:31:31 +02001176static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001177{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001178 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001179 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001180 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001181 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001182 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001183 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001184
1185 if (!hash)
1186 return;
1187
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001188 /* for all origins... */
1189 for (i = 0; i < hash->size; i++) {
1190 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001191 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001192
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001193 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001194 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001195 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001196 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001197 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001198 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001199 batadv_tt_global_del_orig(orig_node->bat_priv,
1200 orig_node, -1,
1201 "originator timed out");
Sven Eckelmann5d967312016-01-17 11:01:09 +01001202 batadv_orig_node_put(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001203 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001204 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001205
1206 batadv_frag_purge_orig(orig_node,
1207 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001208 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001209 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001210 }
1211
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001212 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001213}
1214
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001215static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001216{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001217 struct delayed_work *delayed_work;
1218 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001219
Sven Eckelmann56303d32012-06-05 22:31:31 +02001220 delayed_work = container_of(work, struct delayed_work, work);
1221 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001222 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001223 queue_delayed_work(batadv_event_workqueue,
1224 &bat_priv->orig_work,
1225 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001226}
1227
Sven Eckelmann56303d32012-06-05 22:31:31 +02001228void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001229{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001230 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001231}
1232
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001233int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001234{
1235 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001236 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001237 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001238
Marek Lindner30da63a2012-08-03 17:15:46 +02001239 primary_if = batadv_seq_print_text_primary_if_get(seq);
1240 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001241 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001242
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001243 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001244 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001245 primary_if->net_dev->dev_addr, net_dev->name,
1246 bat_priv->bat_algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001247
Sven Eckelmann82047ad2016-01-17 11:01:10 +01001248 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001249
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001250 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1251 seq_puts(seq,
1252 "No printing function for this routing protocol\n");
1253 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001254 }
1255
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001256 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1257 BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001258
Marek Lindner30da63a2012-08-03 17:15:46 +02001259 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001260}
1261
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001262/**
1263 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1264 * outgoing interface
1265 * @seq: debugfs table seq_file struct
1266 * @offset: not used
1267 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001268 * Return: 0
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001269 */
1270int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1271{
1272 struct net_device *net_dev = (struct net_device *)seq->private;
1273 struct batadv_hard_iface *hard_iface;
1274 struct batadv_priv *bat_priv;
1275
1276 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1277
1278 if (!hard_iface || !hard_iface->soft_iface) {
1279 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1280 goto out;
1281 }
1282
1283 bat_priv = netdev_priv(hard_iface->soft_iface);
1284 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1285 seq_puts(seq,
1286 "No printing function for this routing protocol\n");
1287 goto out;
1288 }
1289
1290 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1291 seq_puts(seq, "Interface not active\n");
1292 goto out;
1293 }
1294
1295 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1296 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1297 hard_iface->net_dev->dev_addr,
1298 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1299
1300 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1301
1302out:
Marek Lindner16a41422014-04-24 03:44:25 +08001303 if (hard_iface)
Sven Eckelmann82047ad2016-01-17 11:01:10 +01001304 batadv_hardif_put(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001305 return 0;
1306}
1307
Sven Eckelmann56303d32012-06-05 22:31:31 +02001308int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1309 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001310{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001311 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001312 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001313 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001314 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001315 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001316 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001317 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001318
1319 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001320 * if_num
1321 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001322 for (i = 0; i < hash->size; i++) {
1323 head = &hash->table[i];
1324
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001325 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001326 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001327 ret = 0;
1328 if (bao->bat_orig_add_if)
1329 ret = bao->bat_orig_add_if(orig_node,
1330 max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001331 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001332 goto err;
1333 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001334 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001335 }
1336
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001337 return 0;
1338
1339err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001340 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001341 return -ENOMEM;
1342}
1343
Sven Eckelmann56303d32012-06-05 22:31:31 +02001344int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1345 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001346{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001347 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001348 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001349 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001350 struct batadv_hard_iface *hard_iface_tmp;
1351 struct batadv_orig_node *orig_node;
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001352 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001353 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001354 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001355
1356 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001357 * if_num
1358 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001359 for (i = 0; i < hash->size; i++) {
1360 head = &hash->table[i];
1361
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001362 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001363 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001364 ret = 0;
1365 if (bao->bat_orig_del_if)
1366 ret = bao->bat_orig_del_if(orig_node,
1367 max_if_num,
1368 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001369 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001370 goto err;
1371 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001372 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001373 }
1374
1375 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1376 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001377 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001378 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001379 continue;
1380
Marek Lindnere6c10f42011-02-18 12:33:20 +00001381 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001382 continue;
1383
Marek Lindnere6c10f42011-02-18 12:33:20 +00001384 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001385 continue;
1386
Marek Lindnere6c10f42011-02-18 12:33:20 +00001387 if (hard_iface_tmp->if_num > hard_iface->if_num)
1388 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001389 }
1390 rcu_read_unlock();
1391
Marek Lindnere6c10f42011-02-18 12:33:20 +00001392 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001393 return 0;
1394
1395err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001396 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001397 return -ENOMEM;
1398}