blob: 0792de869f4ecfb0b5a650f158de32d32ed8a6a4 [file] [log] [blame]
Sven Eckelmann0046b042016-01-01 00:01:03 +01001/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00002 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
Antonio Quartulliebf38fb2013-11-03 20:40:48 +010015 * along with this program; if not, see <http://www.gnu.org/licenses/>.
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000016 */
17
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000018#include "originator.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020019#include "main.h"
20
Sven Eckelmann7c124392016-01-16 10:29:56 +010021#include <linux/atomic.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020022#include <linux/errno.h>
23#include <linux/etherdevice.h>
24#include <linux/fs.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
Sven Eckelmann90f564d2016-01-16 10:29:40 +010027#include <linux/kref.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020028#include <linux/list.h>
29#include <linux/lockdep.h>
30#include <linux/netdevice.h>
Matthias Schiffer85cf8c82016-07-03 13:31:39 +020031#include <linux/netlink.h>
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080032#include <linux/rculist.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020033#include <linux/seq_file.h>
Matthias Schiffer85cf8c82016-07-03 13:31:39 +020034#include <linux/skbuff.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020035#include <linux/slab.h>
36#include <linux/spinlock.h>
37#include <linux/workqueue.h>
Matthias Schiffer85cf8c82016-07-03 13:31:39 +020038#include <net/sock.h>
39#include <uapi/linux/batman_adv.h>
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020040
Sven Eckelmann01d350d2016-05-15 11:07:44 +020041#include "bat_algo.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020042#include "distributed-arp-table.h"
43#include "fragmentation.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044#include "gateway_client.h"
45#include "hard-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020046#include "hash.h"
Sven Eckelmannba412082016-05-15 23:48:31 +020047#include "log.h"
Linus Lüssing60432d72014-02-15 17:47:51 +010048#include "multicast.h"
Matthias Schiffer85cf8c82016-07-03 13:31:39 +020049#include "netlink.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020050#include "network-coding.h"
51#include "routing.h"
Matthias Schiffer85cf8c82016-07-03 13:31:39 +020052#include "soft-interface.h"
Sven Eckelmann1e2c2a42015-04-17 19:40:28 +020053#include "translation-table.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000054
Antonio Quartullidec05072012-11-10 11:00:32 +010055/* hash class keys */
56static struct lock_class_key batadv_orig_hash_lock_class_key;
57
Sven Eckelmann03fc7f82012-05-12 18:34:00 +020058static void batadv_purge_orig(struct work_struct *work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000059
Sven Eckelmann62fe7102015-09-15 19:00:48 +020060/**
Sven Eckelmann7afcbbe2015-10-31 12:29:29 +010061 * batadv_compare_orig - comparing function used in the originator hash table
62 * @node: node in the local table
63 * @data2: second object to compare the node to
Sven Eckelmann62fe7102015-09-15 19:00:48 +020064 *
Sven Eckelmann4b426b12016-02-22 21:02:39 +010065 * Return: true if they are the same originator
Sven Eckelmann62fe7102015-09-15 19:00:48 +020066 */
Sven Eckelmann4b426b12016-02-22 21:02:39 +010067bool batadv_compare_orig(const struct hlist_node *node, const void *data2)
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020068{
Sven Eckelmann56303d32012-06-05 22:31:31 +020069 const void *data1 = container_of(node, struct batadv_orig_node,
70 hash_entry);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020071
dingtianhong323813e2013-12-26 19:40:39 +080072 return batadv_compare_eth(data1, data2);
Sven Eckelmannb8e2dd12011-06-15 15:08:59 +020073}
74
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020075/**
76 * batadv_orig_node_vlan_get - get an orig_node_vlan object
77 * @orig_node: the originator serving the VLAN
78 * @vid: the VLAN identifier
79 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +020080 * Return: the vlan object identified by vid and belonging to orig_node or NULL
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020081 * if it does not exist.
82 */
83struct batadv_orig_node_vlan *
84batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
85 unsigned short vid)
86{
87 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
88
89 rcu_read_lock();
Marek Lindnerd0fa4f32015-06-22 00:30:22 +080090 hlist_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020091 if (tmp->vid != vid)
92 continue;
93
Sven Eckelmann161a3be2016-01-16 10:29:55 +010094 if (!kref_get_unless_zero(&tmp->refcount))
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +020095 continue;
96
97 vlan = tmp;
98
99 break;
100 }
101 rcu_read_unlock();
102
103 return vlan;
104}
105
106/**
107 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
108 * object
109 * @orig_node: the originator serving the VLAN
110 * @vid: the VLAN identifier
111 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200112 * Return: NULL in case of failure or the vlan object identified by vid and
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200113 * belonging to orig_node otherwise. The object is created and added to the list
114 * if it does not exist.
115 *
116 * The object is returned with refcounter increased by 1.
117 */
118struct batadv_orig_node_vlan *
119batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
120 unsigned short vid)
121{
122 struct batadv_orig_node_vlan *vlan;
123
124 spin_lock_bh(&orig_node->vlan_list_lock);
125
126 /* first look if an object for this vid already exists */
127 vlan = batadv_orig_node_vlan_get(orig_node, vid);
128 if (vlan)
129 goto out;
130
131 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
132 if (!vlan)
133 goto out;
134
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100135 kref_init(&vlan->refcount);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200136 vlan->vid = vid;
137
Sven Eckelmann09537d12016-07-15 17:39:16 +0200138 kref_get(&vlan->refcount);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800139 hlist_add_head_rcu(&vlan->list, &orig_node->vlan_list);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200140
141out:
142 spin_unlock_bh(&orig_node->vlan_list_lock);
143
144 return vlan;
145}
146
147/**
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100148 * batadv_orig_node_vlan_release - release originator-vlan object from lists
149 * and queue for free after rcu grace period
150 * @ref: kref pointer of the originator-vlan object
151 */
152static void batadv_orig_node_vlan_release(struct kref *ref)
153{
154 struct batadv_orig_node_vlan *orig_vlan;
155
156 orig_vlan = container_of(ref, struct batadv_orig_node_vlan, refcount);
157
158 kfree_rcu(orig_vlan, rcu);
159}
160
161/**
Sven Eckelmann21754e22016-01-17 11:01:24 +0100162 * batadv_orig_node_vlan_put - decrement the refcounter and possibly release
163 * the originator-vlan object
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200164 * @orig_vlan: the originator-vlan object to release
165 */
Sven Eckelmann21754e22016-01-17 11:01:24 +0100166void batadv_orig_node_vlan_put(struct batadv_orig_node_vlan *orig_vlan)
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200167{
Sven Eckelmann161a3be2016-01-16 10:29:55 +0100168 kref_put(&orig_vlan->refcount, batadv_orig_node_vlan_release);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200169}
170
Sven Eckelmann56303d32012-06-05 22:31:31 +0200171int batadv_originator_init(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000172{
173 if (bat_priv->orig_hash)
Sven Eckelmann5346c352012-05-05 13:27:28 +0200174 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200176 bat_priv->orig_hash = batadv_hash_new(1024);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000177
178 if (!bat_priv->orig_hash)
179 goto err;
180
Antonio Quartullidec05072012-11-10 11:00:32 +0100181 batadv_hash_set_lock_class(bat_priv->orig_hash,
182 &batadv_orig_hash_lock_class_key);
183
Antonio Quartulli72414442012-12-25 13:14:37 +0100184 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
185 queue_delayed_work(batadv_event_workqueue,
186 &bat_priv->orig_work,
187 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
188
Sven Eckelmann5346c352012-05-05 13:27:28 +0200189 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000190
191err:
Sven Eckelmann5346c352012-05-05 13:27:28 +0200192 return -ENOMEM;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000193}
194
Simon Wunderlich89652332013-11-13 19:14:46 +0100195/**
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100196 * batadv_neigh_ifinfo_release - release neigh_ifinfo from lists and queue for
197 * free after rcu grace period
Sven Eckelmann962c6832016-01-16 10:29:51 +0100198 * @ref: kref pointer of the neigh_ifinfo
Simon Wunderlich89652332013-11-13 19:14:46 +0100199 */
Sven Eckelmann962c6832016-01-16 10:29:51 +0100200static void batadv_neigh_ifinfo_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100201{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100202 struct batadv_neigh_ifinfo *neigh_ifinfo;
203
204 neigh_ifinfo = container_of(ref, struct batadv_neigh_ifinfo, refcount);
205
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100206 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100207 batadv_hardif_put(neigh_ifinfo->if_outgoing);
Sven Eckelmannae3e1e32016-01-05 12:06:24 +0100208
209 kfree_rcu(neigh_ifinfo, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100210}
211
212/**
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100213 * batadv_neigh_ifinfo_put - decrement the refcounter and possibly release
Simon Wunderlich89652332013-11-13 19:14:46 +0100214 * the neigh_ifinfo
215 * @neigh_ifinfo: the neigh_ifinfo object to release
216 */
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100217void batadv_neigh_ifinfo_put(struct batadv_neigh_ifinfo *neigh_ifinfo)
Simon Wunderlich89652332013-11-13 19:14:46 +0100218{
Sven Eckelmann962c6832016-01-16 10:29:51 +0100219 kref_put(&neigh_ifinfo->refcount, batadv_neigh_ifinfo_release);
Simon Wunderlich89652332013-11-13 19:14:46 +0100220}
221
222/**
Sven Eckelmannf6389692016-01-05 12:06:23 +0100223 * batadv_hardif_neigh_release - release hardif neigh node from lists and
224 * queue for free after rcu grace period
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100225 * @ref: kref pointer of the neigh_node
Marek Lindnercef63412015-08-04 21:09:55 +0800226 */
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100227static void batadv_hardif_neigh_release(struct kref *ref)
Marek Lindnercef63412015-08-04 21:09:55 +0800228{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100229 struct batadv_hardif_neigh_node *hardif_neigh;
230
231 hardif_neigh = container_of(ref, struct batadv_hardif_neigh_node,
232 refcount);
233
Sven Eckelmannf6389692016-01-05 12:06:23 +0100234 spin_lock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
235 hlist_del_init_rcu(&hardif_neigh->list);
236 spin_unlock_bh(&hardif_neigh->if_incoming->neigh_list_lock);
Sven Eckelmannbab7c6c2016-01-05 12:06:17 +0100237
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100238 batadv_hardif_put(hardif_neigh->if_incoming);
Sven Eckelmannf6389692016-01-05 12:06:23 +0100239 kfree_rcu(hardif_neigh, rcu);
Marek Lindnercef63412015-08-04 21:09:55 +0800240}
241
242/**
Sven Eckelmannaccadc32016-01-17 11:01:14 +0100243 * batadv_hardif_neigh_put - decrement the hardif neighbors refcounter
Sven Eckelmannf6389692016-01-05 12:06:23 +0100244 * and possibly release it
Marek Lindnercef63412015-08-04 21:09:55 +0800245 * @hardif_neigh: hardif neigh neighbor to free
246 */
Sven Eckelmannaccadc32016-01-17 11:01:14 +0100247void batadv_hardif_neigh_put(struct batadv_hardif_neigh_node *hardif_neigh)
Marek Lindnercef63412015-08-04 21:09:55 +0800248{
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100249 kref_put(&hardif_neigh->refcount, batadv_hardif_neigh_release);
Marek Lindnercef63412015-08-04 21:09:55 +0800250}
251
252/**
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100253 * batadv_neigh_node_release - release neigh_node from lists and queue for
254 * free after rcu grace period
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100255 * @ref: kref pointer of the neigh_node
Simon Wunderlich89652332013-11-13 19:14:46 +0100256 */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100257static void batadv_neigh_node_release(struct kref *ref)
Simon Wunderlich89652332013-11-13 19:14:46 +0100258{
259 struct hlist_node *node_tmp;
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100260 struct batadv_neigh_node *neigh_node;
Simon Wunderlich89652332013-11-13 19:14:46 +0100261 struct batadv_neigh_ifinfo *neigh_ifinfo;
262
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100263 neigh_node = container_of(ref, struct batadv_neigh_node, refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100264
265 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
266 &neigh_node->ifinfo_list, list) {
Sven Eckelmann044fa3a2016-01-17 11:01:12 +0100267 batadv_neigh_ifinfo_put(neigh_ifinfo);
Simon Wunderlich89652332013-11-13 19:14:46 +0100268 }
Antonio Quartullibcef1f32015-03-01 00:50:17 +0800269
Sven Eckelmannabe59c62016-03-11 16:44:06 +0100270 batadv_hardif_neigh_put(neigh_node->hardif_neigh);
Marek Lindnercef63412015-08-04 21:09:55 +0800271
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100272 batadv_hardif_put(neigh_node->if_incoming);
Simon Wunderlich89652332013-11-13 19:14:46 +0100273
Sven Eckelmannb4d922c2016-01-05 12:06:25 +0100274 kfree_rcu(neigh_node, rcu);
Simon Wunderlich89652332013-11-13 19:14:46 +0100275}
276
277/**
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100278 * batadv_neigh_node_put - decrement the neighbors refcounter and possibly
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100279 * release it
Simon Wunderlich89652332013-11-13 19:14:46 +0100280 * @neigh_node: neigh neighbor to free
281 */
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100282void batadv_neigh_node_put(struct batadv_neigh_node *neigh_node)
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000283{
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100284 kref_put(&neigh_node->refcount, batadv_neigh_node_release);
Simon Wunderlicha4c135c2011-01-19 20:01:43 +0000285}
286
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100287/**
Antonio Quartulli6d030de2016-03-11 16:36:19 +0100288 * batadv_orig_router_get - router to the originator depending on iface
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100289 * @orig_node: the orig node for the router
290 * @if_outgoing: the interface where the payload packet has been received or
291 * the OGM should be sent to
292 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200293 * Return: the neighbor which should be router for this orig_node/iface.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100294 *
295 * The object is returned with refcounter increased by 1.
296 */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200297struct batadv_neigh_node *
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100298batadv_orig_router_get(struct batadv_orig_node *orig_node,
299 const struct batadv_hard_iface *if_outgoing)
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000300{
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100301 struct batadv_orig_ifinfo *orig_ifinfo;
302 struct batadv_neigh_node *router = NULL;
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000303
304 rcu_read_lock();
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100305 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
306 if (orig_ifinfo->if_outgoing != if_outgoing)
307 continue;
308
309 router = rcu_dereference(orig_ifinfo->router);
310 break;
311 }
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000312
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100313 if (router && !kref_get_unless_zero(&router->refcount))
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000314 router = NULL;
315
316 rcu_read_unlock();
317 return router;
318}
319
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200320/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100321 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
322 * @orig_node: the orig node to be queried
323 * @if_outgoing: the interface for which the ifinfo should be acquired
324 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200325 * Return: the requested orig_ifinfo or NULL if not found.
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100326 *
327 * The object is returned with refcounter increased by 1.
328 */
329struct batadv_orig_ifinfo *
330batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
331 struct batadv_hard_iface *if_outgoing)
332{
333 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
334
335 rcu_read_lock();
336 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
337 list) {
338 if (tmp->if_outgoing != if_outgoing)
339 continue;
340
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100341 if (!kref_get_unless_zero(&tmp->refcount))
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100342 continue;
343
344 orig_ifinfo = tmp;
345 break;
346 }
347 rcu_read_unlock();
348
349 return orig_ifinfo;
350}
351
352/**
353 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
354 * @orig_node: the orig node to be queried
355 * @if_outgoing: the interface for which the ifinfo should be acquired
356 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200357 * Return: NULL in case of failure or the orig_ifinfo object for the if_outgoing
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100358 * interface otherwise. The object is created and added to the list
359 * if it does not exist.
360 *
361 * The object is returned with refcounter increased by 1.
362 */
363struct batadv_orig_ifinfo *
364batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
365 struct batadv_hard_iface *if_outgoing)
366{
367 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
368 unsigned long reset_time;
369
370 spin_lock_bh(&orig_node->neigh_list_lock);
371
372 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
373 if (orig_ifinfo)
374 goto out;
375
376 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
377 if (!orig_ifinfo)
378 goto out;
379
Sven Eckelmann17a86912016-04-11 13:06:40 +0200380 if (if_outgoing != BATADV_IF_DEFAULT)
381 kref_get(&if_outgoing->refcount);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100382
383 reset_time = jiffies - 1;
384 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
385 orig_ifinfo->batman_seqno_reset = reset_time;
386 orig_ifinfo->if_outgoing = if_outgoing;
387 INIT_HLIST_NODE(&orig_ifinfo->list);
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100388 kref_init(&orig_ifinfo->refcount);
Sven Eckelmannf257b992016-07-15 17:39:17 +0200389
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100390 kref_get(&orig_ifinfo->refcount);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100391 hlist_add_head_rcu(&orig_ifinfo->list,
392 &orig_node->ifinfo_list);
393out:
394 spin_unlock_bh(&orig_node->neigh_list_lock);
395 return orig_ifinfo;
396}
397
398/**
Simon Wunderlich89652332013-11-13 19:14:46 +0100399 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200400 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100401 * @if_outgoing: the interface for which the ifinfo should be acquired
402 *
403 * The object is returned with refcounter increased by 1.
404 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200405 * Return: the requested neigh_ifinfo or NULL if not found
Simon Wunderlich89652332013-11-13 19:14:46 +0100406 */
407struct batadv_neigh_ifinfo *
408batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
409 struct batadv_hard_iface *if_outgoing)
410{
411 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
412 *tmp_neigh_ifinfo;
413
414 rcu_read_lock();
415 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
416 list) {
417 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
418 continue;
419
Sven Eckelmann962c6832016-01-16 10:29:51 +0100420 if (!kref_get_unless_zero(&tmp_neigh_ifinfo->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +0100421 continue;
422
423 neigh_ifinfo = tmp_neigh_ifinfo;
424 break;
425 }
426 rcu_read_unlock();
427
428 return neigh_ifinfo;
429}
430
431/**
432 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
Sven Eckelmanne51f0392015-09-06 21:38:51 +0200433 * @neigh: the neigh node to be queried
Simon Wunderlich89652332013-11-13 19:14:46 +0100434 * @if_outgoing: the interface for which the ifinfo should be acquired
435 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200436 * Return: NULL in case of failure or the neigh_ifinfo object for the
Simon Wunderlich89652332013-11-13 19:14:46 +0100437 * if_outgoing interface otherwise. The object is created and added to the list
438 * if it does not exist.
439 *
440 * The object is returned with refcounter increased by 1.
441 */
442struct batadv_neigh_ifinfo *
443batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
444 struct batadv_hard_iface *if_outgoing)
445{
446 struct batadv_neigh_ifinfo *neigh_ifinfo;
447
448 spin_lock_bh(&neigh->ifinfo_lock);
449
450 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
451 if (neigh_ifinfo)
452 goto out;
453
454 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
455 if (!neigh_ifinfo)
456 goto out;
457
Sven Eckelmann17a86912016-04-11 13:06:40 +0200458 if (if_outgoing)
459 kref_get(&if_outgoing->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100460
461 INIT_HLIST_NODE(&neigh_ifinfo->list);
Sven Eckelmann962c6832016-01-16 10:29:51 +0100462 kref_init(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100463 neigh_ifinfo->if_outgoing = if_outgoing;
464
Sven Eckelmann2e774ac2016-07-15 17:39:19 +0200465 kref_get(&neigh_ifinfo->refcount);
Simon Wunderlich89652332013-11-13 19:14:46 +0100466 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
467
468out:
469 spin_unlock_bh(&neigh->ifinfo_lock);
470
471 return neigh_ifinfo;
472}
473
474/**
Marek Lindnered292662015-08-04 23:31:44 +0800475 * batadv_neigh_node_get - retrieve a neighbour from the list
476 * @orig_node: originator which the neighbour belongs to
477 * @hard_iface: the interface where this neighbour is connected to
478 * @addr: the address of the neighbour
479 *
480 * Looks for and possibly returns a neighbour belonging to this originator list
481 * which is connected through the provided hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200482 *
483 * Return: neighbor when found. Othwerwise NULL
Marek Lindnered292662015-08-04 23:31:44 +0800484 */
485static struct batadv_neigh_node *
486batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
487 const struct batadv_hard_iface *hard_iface,
488 const u8 *addr)
489{
490 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
491
492 rcu_read_lock();
493 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
494 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
495 continue;
496
497 if (tmp_neigh_node->if_incoming != hard_iface)
498 continue;
499
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100500 if (!kref_get_unless_zero(&tmp_neigh_node->refcount))
Marek Lindnered292662015-08-04 23:31:44 +0800501 continue;
502
503 res = tmp_neigh_node;
504 break;
505 }
506 rcu_read_unlock();
507
508 return res;
509}
510
511/**
Marek Lindnercef63412015-08-04 21:09:55 +0800512 * batadv_hardif_neigh_create - create a hardif neighbour node
513 * @hard_iface: the interface this neighbour is connected to
514 * @neigh_addr: the interface address of the neighbour to retrieve
515 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200516 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800517 */
518static struct batadv_hardif_neigh_node *
519batadv_hardif_neigh_create(struct batadv_hard_iface *hard_iface,
520 const u8 *neigh_addr)
521{
Marek Lindner8248a4c2015-08-04 21:09:56 +0800522 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindnercef63412015-08-04 21:09:55 +0800523 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
524
525 spin_lock_bh(&hard_iface->neigh_list_lock);
526
527 /* check if neighbor hasn't been added in the meantime */
528 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
529 if (hardif_neigh)
530 goto out;
531
Marek Lindnercef63412015-08-04 21:09:55 +0800532 hardif_neigh = kzalloc(sizeof(*hardif_neigh), GFP_ATOMIC);
Sven Eckelmann17a86912016-04-11 13:06:40 +0200533 if (!hardif_neigh)
Marek Lindnercef63412015-08-04 21:09:55 +0800534 goto out;
Marek Lindnercef63412015-08-04 21:09:55 +0800535
Sven Eckelmann17a86912016-04-11 13:06:40 +0200536 kref_get(&hard_iface->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800537 INIT_HLIST_NODE(&hardif_neigh->list);
538 ether_addr_copy(hardif_neigh->addr, neigh_addr);
539 hardif_neigh->if_incoming = hard_iface;
540 hardif_neigh->last_seen = jiffies;
541
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100542 kref_init(&hardif_neigh->refcount);
Marek Lindnercef63412015-08-04 21:09:55 +0800543
Antonio Quartulli29824a52016-05-25 23:27:31 +0800544 if (bat_priv->algo_ops->neigh.hardif_init)
545 bat_priv->algo_ops->neigh.hardif_init(hardif_neigh);
Marek Lindner8248a4c2015-08-04 21:09:56 +0800546
Marek Lindnercef63412015-08-04 21:09:55 +0800547 hlist_add_head(&hardif_neigh->list, &hard_iface->neigh_list);
548
549out:
550 spin_unlock_bh(&hard_iface->neigh_list_lock);
551 return hardif_neigh;
552}
553
554/**
555 * batadv_hardif_neigh_get_or_create - retrieve or create a hardif neighbour
556 * node
557 * @hard_iface: the interface this neighbour is connected to
558 * @neigh_addr: the interface address of the neighbour to retrieve
559 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200560 * Return: the hardif neighbour node if found or created or NULL otherwise.
Marek Lindnercef63412015-08-04 21:09:55 +0800561 */
562static struct batadv_hardif_neigh_node *
563batadv_hardif_neigh_get_or_create(struct batadv_hard_iface *hard_iface,
564 const u8 *neigh_addr)
565{
566 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
567
568 /* first check without locking to avoid the overhead */
569 hardif_neigh = batadv_hardif_neigh_get(hard_iface, neigh_addr);
570 if (hardif_neigh)
571 return hardif_neigh;
572
573 return batadv_hardif_neigh_create(hard_iface, neigh_addr);
574}
575
576/**
577 * batadv_hardif_neigh_get - retrieve a hardif neighbour from the list
578 * @hard_iface: the interface where this neighbour is connected to
579 * @neigh_addr: the address of the neighbour
580 *
581 * Looks for and possibly returns a neighbour belonging to this hard interface.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200582 *
583 * Return: neighbor when found. Othwerwise NULL
Marek Lindnercef63412015-08-04 21:09:55 +0800584 */
585struct batadv_hardif_neigh_node *
586batadv_hardif_neigh_get(const struct batadv_hard_iface *hard_iface,
587 const u8 *neigh_addr)
588{
589 struct batadv_hardif_neigh_node *tmp_hardif_neigh, *hardif_neigh = NULL;
590
591 rcu_read_lock();
592 hlist_for_each_entry_rcu(tmp_hardif_neigh,
593 &hard_iface->neigh_list, list) {
594 if (!batadv_compare_eth(tmp_hardif_neigh->addr, neigh_addr))
595 continue;
596
Sven Eckelmann90f564d2016-01-16 10:29:40 +0100597 if (!kref_get_unless_zero(&tmp_hardif_neigh->refcount))
Marek Lindnercef63412015-08-04 21:09:55 +0800598 continue;
599
600 hardif_neigh = tmp_hardif_neigh;
601 break;
602 }
603 rcu_read_unlock();
604
605 return hardif_neigh;
606}
607
608/**
Marek Lindner6f0a6b52016-05-03 01:52:08 +0800609 * batadv_neigh_node_create - create a neigh node object
Marek Lindner3f32f8a2015-07-26 04:59:15 +0800610 * @orig_node: originator object representing the neighbour
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200611 * @hard_iface: the interface where the neighbour is connected to
612 * @neigh_addr: the mac address of the neighbour interface
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200613 *
614 * Allocates a new neigh_node object and initialises all the generic fields.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200615 *
Marek Lindner6f0a6b52016-05-03 01:52:08 +0800616 * Return: the neighbour node if found or created or NULL otherwise.
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200617 */
Marek Lindner6f0a6b52016-05-03 01:52:08 +0800618static struct batadv_neigh_node *
619batadv_neigh_node_create(struct batadv_orig_node *orig_node,
620 struct batadv_hard_iface *hard_iface,
621 const u8 *neigh_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000622{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200623 struct batadv_neigh_node *neigh_node;
Marek Lindnercef63412015-08-04 21:09:55 +0800624 struct batadv_hardif_neigh_node *hardif_neigh = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000625
Linus Lüssinge1237052016-01-07 08:11:12 +0100626 spin_lock_bh(&orig_node->neigh_list_lock);
627
Marek Lindner741aa062015-07-26 04:57:43 +0800628 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
629 if (neigh_node)
630 goto out;
631
Marek Lindnercef63412015-08-04 21:09:55 +0800632 hardif_neigh = batadv_hardif_neigh_get_or_create(hard_iface,
633 neigh_addr);
634 if (!hardif_neigh)
635 goto out;
636
Sven Eckelmann704509b2011-05-14 23:14:54 +0200637 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000638 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800639 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000640
Marek Lindner9591a792010-12-12 21:57:11 +0000641 INIT_HLIST_NODE(&neigh_node->list);
Simon Wunderlich89652332013-11-13 19:14:46 +0100642 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
643 spin_lock_init(&neigh_node->ifinfo_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000644
Sven Eckelmann17a86912016-04-11 13:06:40 +0200645 kref_get(&hard_iface->refcount);
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100646 ether_addr_copy(neigh_node->addr, neigh_addr);
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200647 neigh_node->if_incoming = hard_iface;
648 neigh_node->orig_node = orig_node;
Marek Lindnere48474e2016-03-11 16:01:09 +0100649 neigh_node->last_seen = jiffies;
Antonio Quartulli0538f7592013-09-02 12:15:01 +0200650
Sven Eckelmannabe59c62016-03-11 16:44:06 +0100651 /* increment unique neighbor refcount */
652 kref_get(&hardif_neigh->refcount);
653 neigh_node->hardif_neigh = hardif_neigh;
654
Marek Lindner1605d0d2011-02-18 12:28:11 +0000655 /* extra reference for return */
Sven Eckelmann77ae32e2016-01-16 10:29:53 +0100656 kref_init(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000657
Sven Eckelmann84274452016-07-15 17:39:20 +0200658 kref_get(&neigh_node->refcount);
Marek Lindner741aa062015-07-26 04:57:43 +0800659 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
Marek Lindner741aa062015-07-26 04:57:43 +0800660
661 batadv_dbg(BATADV_DBG_BATMAN, orig_node->bat_priv,
662 "Creating new neighbor %pM for orig_node %pM on interface %s\n",
663 neigh_addr, orig_node->orig, hard_iface->net_dev->name);
664
Marek Lindner7ae8b282012-03-01 15:35:21 +0800665out:
Linus Lüssinge1237052016-01-07 08:11:12 +0100666 spin_unlock_bh(&orig_node->neigh_list_lock);
667
Marek Lindnercef63412015-08-04 21:09:55 +0800668 if (hardif_neigh)
Sven Eckelmannaccadc32016-01-17 11:01:14 +0100669 batadv_hardif_neigh_put(hardif_neigh);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000670 return neigh_node;
671}
672
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100673/**
Marek Lindner6f0a6b52016-05-03 01:52:08 +0800674 * batadv_neigh_node_get_or_create - retrieve or create a neigh node object
675 * @orig_node: originator object representing the neighbour
676 * @hard_iface: the interface where the neighbour is connected to
677 * @neigh_addr: the mac address of the neighbour interface
678 *
679 * Return: the neighbour node if found or created or NULL otherwise.
680 */
681struct batadv_neigh_node *
682batadv_neigh_node_get_or_create(struct batadv_orig_node *orig_node,
683 struct batadv_hard_iface *hard_iface,
684 const u8 *neigh_addr)
685{
686 struct batadv_neigh_node *neigh_node = NULL;
687
688 /* first check without locking to avoid the overhead */
689 neigh_node = batadv_neigh_node_get(orig_node, hard_iface, neigh_addr);
690 if (neigh_node)
691 return neigh_node;
692
693 return batadv_neigh_node_create(orig_node, hard_iface, neigh_addr);
694}
695
696/**
Marek Lindner75874052015-08-04 21:09:57 +0800697 * batadv_hardif_neigh_seq_print_text - print the single hop neighbour list
698 * @seq: neighbour table seq_file struct
699 * @offset: not used
700 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200701 * Return: always 0
Marek Lindner75874052015-08-04 21:09:57 +0800702 */
703int batadv_hardif_neigh_seq_print_text(struct seq_file *seq, void *offset)
704{
705 struct net_device *net_dev = (struct net_device *)seq->private;
706 struct batadv_priv *bat_priv = netdev_priv(net_dev);
707 struct batadv_hard_iface *primary_if;
708
709 primary_if = batadv_seq_print_text_primary_if_get(seq);
710 if (!primary_if)
711 return 0;
712
713 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
714 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
715 primary_if->net_dev->dev_addr, net_dev->name,
Antonio Quartulli29824a52016-05-25 23:27:31 +0800716 bat_priv->algo_ops->name);
Marek Lindner75874052015-08-04 21:09:57 +0800717
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100718 batadv_hardif_put(primary_if);
Marek Lindner75874052015-08-04 21:09:57 +0800719
Antonio Quartulli29824a52016-05-25 23:27:31 +0800720 if (!bat_priv->algo_ops->neigh.print) {
Marek Lindner75874052015-08-04 21:09:57 +0800721 seq_puts(seq,
722 "No printing function for this routing protocol\n");
723 return 0;
724 }
725
Antonio Quartulli29824a52016-05-25 23:27:31 +0800726 bat_priv->algo_ops->neigh.print(bat_priv, seq);
Marek Lindner75874052015-08-04 21:09:57 +0800727 return 0;
728}
729
730/**
Matthias Schiffer85cf8c82016-07-03 13:31:39 +0200731 * batadv_hardif_neigh_dump - Dump to netlink the neighbor infos for a specific
732 * outgoing interface
733 * @msg: message to dump into
734 * @cb: parameters for the dump
735 *
736 * Return: 0 or error value
737 */
738int batadv_hardif_neigh_dump(struct sk_buff *msg, struct netlink_callback *cb)
739{
740 struct net *net = sock_net(cb->skb->sk);
741 struct net_device *soft_iface;
742 struct net_device *hard_iface = NULL;
743 struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
744 struct batadv_priv *bat_priv;
745 struct batadv_hard_iface *primary_if = NULL;
746 int ret;
747 int ifindex, hard_ifindex;
748
749 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
750 if (!ifindex)
751 return -EINVAL;
752
753 soft_iface = dev_get_by_index(net, ifindex);
754 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
755 ret = -ENODEV;
756 goto out;
757 }
758
759 bat_priv = netdev_priv(soft_iface);
760
761 primary_if = batadv_primary_if_get_selected(bat_priv);
762 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
763 ret = -ENOENT;
764 goto out;
765 }
766
767 hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
768 BATADV_ATTR_HARD_IFINDEX);
769 if (hard_ifindex) {
770 hard_iface = dev_get_by_index(net, hard_ifindex);
771 if (hard_iface)
772 hardif = batadv_hardif_get_by_netdev(hard_iface);
773
774 if (!hardif) {
775 ret = -ENODEV;
776 goto out;
777 }
778
779 if (hardif->soft_iface != soft_iface) {
780 ret = -ENOENT;
781 goto out;
782 }
783 }
784
785 if (!bat_priv->algo_ops->neigh.dump) {
786 ret = -EOPNOTSUPP;
787 goto out;
788 }
789
790 bat_priv->algo_ops->neigh.dump(msg, cb, bat_priv, hardif);
791
792 ret = msg->len;
793
794 out:
795 if (hardif)
796 batadv_hardif_put(hardif);
797 if (hard_iface)
798 dev_put(hard_iface);
799 if (primary_if)
800 batadv_hardif_put(primary_if);
801 if (soft_iface)
802 dev_put(soft_iface);
803
804 return ret;
805}
806
807/**
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100808 * batadv_orig_ifinfo_release - release orig_ifinfo from lists and queue for
809 * free after rcu grace period
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100810 * @ref: kref pointer of the orig_ifinfo
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100811 */
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100812static void batadv_orig_ifinfo_release(struct kref *ref)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100813{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100814 struct batadv_orig_ifinfo *orig_ifinfo;
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100815 struct batadv_neigh_node *router;
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100816
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100817 orig_ifinfo = container_of(ref, struct batadv_orig_ifinfo, refcount);
818
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100819 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
Sven Eckelmann82047ad2016-01-17 11:01:10 +0100820 batadv_hardif_put(orig_ifinfo->if_outgoing);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100821
Simon Wunderlich000c8df2014-03-26 15:46:22 +0100822 /* this is the last reference to this object */
823 router = rcu_dereference_protected(orig_ifinfo->router, true);
824 if (router)
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100825 batadv_neigh_node_put(router);
Sven Eckelmann2baa7532016-01-05 12:06:22 +0100826
827 kfree_rcu(orig_ifinfo, rcu);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100828}
829
830/**
Sven Eckelmann35f94772016-01-17 11:01:13 +0100831 * batadv_orig_ifinfo_put - decrement the refcounter and possibly release
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100832 * the orig_ifinfo
833 * @orig_ifinfo: the orig_ifinfo object to release
834 */
Sven Eckelmann35f94772016-01-17 11:01:13 +0100835void batadv_orig_ifinfo_put(struct batadv_orig_ifinfo *orig_ifinfo)
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100836{
Sven Eckelmanna6ba0d32016-01-16 10:29:52 +0100837 kref_put(&orig_ifinfo->refcount, batadv_orig_ifinfo_release);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100838}
839
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100840/**
841 * batadv_orig_node_free_rcu - free the orig_node
842 * @rcu: rcu pointer of the orig_node
843 */
Sven Eckelmann03fc7f82012-05-12 18:34:00 +0200844static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000845{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200846 struct batadv_orig_node *orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000847
Sven Eckelmann56303d32012-06-05 22:31:31 +0200848 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000849
Linus Lüssing60432d72014-02-15 17:47:51 +0100850 batadv_mcast_purge_orig(orig_node);
851
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +0200852 batadv_frag_purge_orig(orig_node, NULL);
853
Antonio Quartulli29824a52016-05-25 23:27:31 +0800854 if (orig_node->bat_priv->algo_ops->orig.free)
855 orig_node->bat_priv->algo_ops->orig.free(orig_node);
Antonio Quartullid0015fd2013-09-03 11:10:23 +0200856
Antonio Quartullia73105b2011-04-27 14:27:44 +0200857 kfree(orig_node->tt_buff);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000858 kfree(orig_node);
859}
860
Linus Lüssing72822222013-04-15 21:43:29 +0800861/**
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100862 * batadv_orig_node_release - release orig_node from lists and queue for
863 * free after rcu grace period
Sven Eckelmann7c124392016-01-16 10:29:56 +0100864 * @ref: kref pointer of the orig_node
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100865 */
Sven Eckelmann7c124392016-01-16 10:29:56 +0100866static void batadv_orig_node_release(struct kref *ref)
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100867{
868 struct hlist_node *node_tmp;
869 struct batadv_neigh_node *neigh_node;
Sven Eckelmann7c124392016-01-16 10:29:56 +0100870 struct batadv_orig_node *orig_node;
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100871 struct batadv_orig_ifinfo *orig_ifinfo;
Sven Eckelmann33fbb1f2016-06-30 20:10:46 +0200872 struct batadv_orig_node_vlan *vlan;
Sven Eckelmanncbef1e12016-06-30 21:41:13 +0200873 struct batadv_orig_ifinfo *last_candidate;
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100874
Sven Eckelmann7c124392016-01-16 10:29:56 +0100875 orig_node = container_of(ref, struct batadv_orig_node, refcount);
876
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100877 spin_lock_bh(&orig_node->neigh_list_lock);
878
879 /* for all neighbors towards this originator ... */
880 hlist_for_each_entry_safe(neigh_node, node_tmp,
881 &orig_node->neigh_list, list) {
882 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann25bb2502016-01-17 11:01:11 +0100883 batadv_neigh_node_put(neigh_node);
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100884 }
885
886 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
887 &orig_node->ifinfo_list, list) {
888 hlist_del_rcu(&orig_ifinfo->list);
Sven Eckelmann35f94772016-01-17 11:01:13 +0100889 batadv_orig_ifinfo_put(orig_ifinfo);
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100890 }
Sven Eckelmanncbef1e12016-06-30 21:41:13 +0200891
892 last_candidate = orig_node->last_bonding_candidate;
893 orig_node->last_bonding_candidate = NULL;
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100894 spin_unlock_bh(&orig_node->neigh_list_lock);
895
Sven Eckelmanncbef1e12016-06-30 21:41:13 +0200896 if (last_candidate)
897 batadv_orig_ifinfo_put(last_candidate);
898
Sven Eckelmann33fbb1f2016-06-30 20:10:46 +0200899 spin_lock_bh(&orig_node->vlan_list_lock);
900 hlist_for_each_entry_safe(vlan, node_tmp, &orig_node->vlan_list, list) {
901 hlist_del_rcu(&vlan->list);
902 batadv_orig_node_vlan_put(vlan);
903 }
904 spin_unlock_bh(&orig_node->vlan_list_lock);
905
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100906 /* Free nc_nodes */
907 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
908
909 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
910}
911
912/**
Sven Eckelmann5d967312016-01-17 11:01:09 +0100913 * batadv_orig_node_put - decrement the orig node refcounter and possibly
Sven Eckelmanndeed9662016-01-05 12:06:21 +0100914 * release it
Linus Lüssing72822222013-04-15 21:43:29 +0800915 * @orig_node: the orig node to free
916 */
Sven Eckelmann5d967312016-01-17 11:01:09 +0100917void batadv_orig_node_put(struct batadv_orig_node *orig_node)
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000918{
Sven Eckelmann7c124392016-01-16 10:29:56 +0100919 kref_put(&orig_node->refcount, batadv_orig_node_release);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000920}
921
Sven Eckelmann56303d32012-06-05 22:31:31 +0200922void batadv_originator_free(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000923{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +0200924 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800925 struct hlist_node *node_tmp;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000926 struct hlist_head *head;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000927 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +0200928 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200929 u32 i;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000930
931 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000932 return;
933
934 cancel_delayed_work_sync(&bat_priv->orig_work);
935
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000936 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000937
938 for (i = 0; i < hash->size; i++) {
939 head = &hash->table[i];
940 list_lock = &hash->list_locks[i];
941
942 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -0800943 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +0000944 head, hash_entry) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800945 hlist_del_rcu(&orig_node->hash_entry);
Sven Eckelmann5d967312016-01-17 11:01:09 +0100946 batadv_orig_node_put(orig_node);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000947 }
948 spin_unlock_bh(list_lock);
949 }
950
Sven Eckelmann1a8eaf02012-05-12 02:09:32 +0200951 batadv_hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000952}
953
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200954/**
955 * batadv_orig_node_new - creates a new orig_node
956 * @bat_priv: the bat priv with all the soft interface information
957 * @addr: the mac address of the originator
958 *
959 * Creates a new originator object and initialise all the generic fields.
960 * The new object is not added to the originator list.
Sven Eckelmann62fe7102015-09-15 19:00:48 +0200961 *
962 * Return: the newly created object or NULL on failure.
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +0200963 */
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200964struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
Sven Eckelmann6b5e9712015-05-26 18:34:26 +0200965 const u8 *addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000966{
Sven Eckelmann56303d32012-06-05 22:31:31 +0200967 struct batadv_orig_node *orig_node;
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200968 struct batadv_orig_node_vlan *vlan;
Sven Eckelmann42d0b042012-06-03 22:19:17 +0200969 unsigned long reset_time;
Antonio Quartullibbad0a52013-09-02 12:15:02 +0200970 int i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000971
Sven Eckelmann39c75a52012-06-03 22:19:22 +0200972 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
973 "Creating new originator: %pM\n", addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000974
Sven Eckelmann704509b2011-05-14 23:14:54 +0200975 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000976 if (!orig_node)
977 return NULL;
978
Marek Lindner9591a792010-12-12 21:57:11 +0000979 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindnerd0fa4f32015-06-22 00:30:22 +0800980 INIT_HLIST_HEAD(&orig_node->vlan_list);
Simon Wunderlich7351a4822013-11-13 19:14:47 +0100981 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
Marek Lindnerf3e00082011-01-25 21:52:11 +0000982 spin_lock_init(&orig_node->bcast_seqno_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000983 spin_lock_init(&orig_node->neigh_list_lock);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200984 spin_lock_init(&orig_node->tt_buff_lock);
Antonio Quartullia70a9aa2013-07-30 22:16:24 +0200985 spin_lock_init(&orig_node->tt_lock);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +0200986 spin_lock_init(&orig_node->vlan_list_lock);
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000987
Martin Hundebølld56b1702013-01-25 11:12:39 +0100988 batadv_nc_init_orig(orig_node);
989
Marek Lindner7b36e8e2011-02-18 12:28:10 +0000990 /* extra reference for return */
Sven Eckelmann7c124392016-01-16 10:29:56 +0100991 kref_init(&orig_node->refcount);
992 kref_get(&orig_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000993
Marek Lindner16b1aba2011-01-19 20:01:42 +0000994 orig_node->bat_priv = bat_priv;
Antonio Quartulli8fdd0152014-01-22 00:42:11 +0100995 ether_addr_copy(orig_node->orig, addr);
Antonio Quartulli785ea112011-11-23 11:35:44 +0100996 batadv_dat_init_orig_node_addr(orig_node);
Antonio Quartullic8c991b2011-07-07 01:40:57 +0200997 atomic_set(&orig_node->last_ttvn, 0);
Antonio Quartulli2dafb492011-05-05 08:42:45 +0200998 orig_node->tt_buff = NULL;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200999 orig_node->tt_buff_len = 0;
Linus Lüssing2c667a32014-10-30 06:23:40 +01001000 orig_node->last_seen = jiffies;
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001001 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
1002 orig_node->bcast_seqno_reset = reset_time;
Linus Lüssing8a4023c2015-06-16 17:10:26 +02001003
Linus Lüssing60432d72014-02-15 17:47:51 +01001004#ifdef CONFIG_BATMAN_ADV_MCAST
1005 orig_node->mcast_flags = BATADV_NO_FLAGS;
Linus Lüssing8a4023c2015-06-16 17:10:26 +02001006 INIT_HLIST_NODE(&orig_node->mcast_want_all_unsnoopables_node);
1007 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv4_node);
1008 INIT_HLIST_NODE(&orig_node->mcast_want_all_ipv6_node);
1009 spin_lock_init(&orig_node->mcast_handler_lock);
Linus Lüssing60432d72014-02-15 17:47:51 +01001010#endif
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001011
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +02001012 /* create a vlan object for the "untagged" LAN */
1013 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
1014 if (!vlan)
1015 goto free_orig_node;
1016 /* batadv_orig_node_vlan_new() increases the refcounter.
1017 * Immediately release vlan since it is not needed anymore in this
1018 * context
1019 */
Sven Eckelmann21754e22016-01-17 11:01:24 +01001020 batadv_orig_node_vlan_put(vlan);
Antonio Quartulli7ea7b4a2013-07-30 22:16:25 +02001021
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001022 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
1023 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
1024 spin_lock_init(&orig_node->fragments[i].lock);
1025 orig_node->fragments[i].size = 0;
1026 }
1027
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001028 return orig_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001029free_orig_node:
1030 kfree(orig_node);
1031 return NULL;
1032}
1033
Simon Wunderlich89652332013-11-13 19:14:46 +01001034/**
Simon Wunderlich709de132014-03-26 15:46:24 +01001035 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
1036 * @bat_priv: the bat priv with all the soft interface information
1037 * @neigh: orig node which is to be checked
1038 */
1039static void
1040batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
1041 struct batadv_neigh_node *neigh)
1042{
1043 struct batadv_neigh_ifinfo *neigh_ifinfo;
1044 struct batadv_hard_iface *if_outgoing;
1045 struct hlist_node *node_tmp;
1046
1047 spin_lock_bh(&neigh->ifinfo_lock);
1048
1049 /* for all ifinfo objects for this neighinator */
1050 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
1051 &neigh->ifinfo_list, list) {
1052 if_outgoing = neigh_ifinfo->if_outgoing;
1053
1054 /* always keep the default interface */
1055 if (if_outgoing == BATADV_IF_DEFAULT)
1056 continue;
1057
1058 /* don't purge if the interface is not (going) down */
1059 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
1060 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
1061 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
1062 continue;
1063
1064 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1065 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
1066 neigh->addr, if_outgoing->net_dev->name);
1067
1068 hlist_del_rcu(&neigh_ifinfo->list);
Sven Eckelmann044fa3a2016-01-17 11:01:12 +01001069 batadv_neigh_ifinfo_put(neigh_ifinfo);
Simon Wunderlich709de132014-03-26 15:46:24 +01001070 }
1071
1072 spin_unlock_bh(&neigh->ifinfo_lock);
1073}
1074
1075/**
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001076 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
1077 * @bat_priv: the bat priv with all the soft interface information
1078 * @orig_node: orig node which is to be checked
1079 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001080 * Return: true if any ifinfo entry was purged, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001081 */
1082static bool
1083batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
1084 struct batadv_orig_node *orig_node)
1085{
1086 struct batadv_orig_ifinfo *orig_ifinfo;
1087 struct batadv_hard_iface *if_outgoing;
1088 struct hlist_node *node_tmp;
1089 bool ifinfo_purged = false;
1090
1091 spin_lock_bh(&orig_node->neigh_list_lock);
1092
1093 /* for all ifinfo objects for this originator */
1094 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
1095 &orig_node->ifinfo_list, list) {
1096 if_outgoing = orig_ifinfo->if_outgoing;
1097
1098 /* always keep the default interface */
1099 if (if_outgoing == BATADV_IF_DEFAULT)
1100 continue;
1101
1102 /* don't purge if the interface is not (going) down */
1103 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
1104 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
1105 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
1106 continue;
1107
1108 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
1109 "router/ifinfo purge: originator %pM, iface: %s\n",
1110 orig_node->orig, if_outgoing->net_dev->name);
1111
1112 ifinfo_purged = true;
1113
1114 hlist_del_rcu(&orig_ifinfo->list);
Sven Eckelmann35f94772016-01-17 11:01:13 +01001115 batadv_orig_ifinfo_put(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +01001116 if (orig_node->last_bonding_candidate == orig_ifinfo) {
1117 orig_node->last_bonding_candidate = NULL;
Sven Eckelmann35f94772016-01-17 11:01:13 +01001118 batadv_orig_ifinfo_put(orig_ifinfo);
Simon Wunderlichf3b3d902013-11-13 19:14:50 +01001119 }
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001120 }
1121
1122 spin_unlock_bh(&orig_node->neigh_list_lock);
1123
1124 return ifinfo_purged;
1125}
1126
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001127/**
Simon Wunderlich89652332013-11-13 19:14:46 +01001128 * batadv_purge_orig_neighbors - purges neighbors from originator
1129 * @bat_priv: the bat priv with all the soft interface information
1130 * @orig_node: orig node which is to be checked
1131 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001132 * Return: true if any neighbor was purged, false otherwise
Simon Wunderlich89652332013-11-13 19:14:46 +01001133 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001134static bool
1135batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
Simon Wunderlich89652332013-11-13 19:14:46 +01001136 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001137{
Sasha Levinb67bfe02013-02-27 17:06:00 -08001138 struct hlist_node *node_tmp;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001139 struct batadv_neigh_node *neigh_node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001140 bool neigh_purged = false;
Marek Lindner0b0094e2012-03-01 15:35:20 +08001141 unsigned long last_seen;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001142 struct batadv_hard_iface *if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001143
Marek Lindnerf987ed62010-12-12 21:57:12 +00001144 spin_lock_bh(&orig_node->neigh_list_lock);
1145
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001146 /* for all neighbors towards this originator ... */
Sasha Levinb67bfe02013-02-27 17:06:00 -08001147 hlist_for_each_entry_safe(neigh_node, node_tmp,
Marek Lindner9591a792010-12-12 21:57:11 +00001148 &orig_node->neigh_list, list) {
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001149 last_seen = neigh_node->last_seen;
1150 if_incoming = neigh_node->if_incoming;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001151
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001152 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001153 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
1154 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1155 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001156 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
1157 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
1158 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001159 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001160 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
1161 orig_node->orig, neigh_node->addr,
1162 if_incoming->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001163 else
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001164 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001165 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
1166 orig_node->orig, neigh_node->addr,
1167 jiffies_to_msecs(last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001168
1169 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +00001170
Marek Lindnerf987ed62010-12-12 21:57:12 +00001171 hlist_del_rcu(&neigh_node->list);
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001172 batadv_neigh_node_put(neigh_node);
Simon Wunderlich709de132014-03-26 15:46:24 +01001173 } else {
1174 /* only necessary if not the whole neighbor is to be
1175 * deleted, but some interface has been removed.
1176 */
1177 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001178 }
1179 }
Marek Lindnerf987ed62010-12-12 21:57:12 +00001180
1181 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001182 return neigh_purged;
1183}
1184
Simon Wunderlich89652332013-11-13 19:14:46 +01001185/**
1186 * batadv_find_best_neighbor - finds the best neighbor after purging
1187 * @bat_priv: the bat priv with all the soft interface information
1188 * @orig_node: orig node which is to be checked
1189 * @if_outgoing: the interface for which the metric should be compared
1190 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001191 * Return: the current best neighbor, with refcount increased.
Simon Wunderlich89652332013-11-13 19:14:46 +01001192 */
1193static struct batadv_neigh_node *
1194batadv_find_best_neighbor(struct batadv_priv *bat_priv,
1195 struct batadv_orig_node *orig_node,
1196 struct batadv_hard_iface *if_outgoing)
1197{
1198 struct batadv_neigh_node *best = NULL, *neigh;
Antonio Quartulli29824a52016-05-25 23:27:31 +08001199 struct batadv_algo_ops *bao = bat_priv->algo_ops;
Simon Wunderlich89652332013-11-13 19:14:46 +01001200
1201 rcu_read_lock();
1202 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
Antonio Quartulli29824a52016-05-25 23:27:31 +08001203 if (best && (bao->neigh.cmp(neigh, if_outgoing, best,
1204 if_outgoing) <= 0))
Simon Wunderlich89652332013-11-13 19:14:46 +01001205 continue;
1206
Sven Eckelmann77ae32e2016-01-16 10:29:53 +01001207 if (!kref_get_unless_zero(&neigh->refcount))
Simon Wunderlich89652332013-11-13 19:14:46 +01001208 continue;
1209
1210 if (best)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001211 batadv_neigh_node_put(best);
Simon Wunderlich89652332013-11-13 19:14:46 +01001212
1213 best = neigh;
1214 }
1215 rcu_read_unlock();
1216
1217 return best;
1218}
1219
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001220/**
1221 * batadv_purge_orig_node - purges obsolete information from an orig_node
1222 * @bat_priv: the bat priv with all the soft interface information
1223 * @orig_node: orig node which is to be checked
1224 *
1225 * This function checks if the orig_node or substructures of it have become
1226 * obsolete, and purges this information if that's the case.
1227 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001228 * Return: true if the orig_node is to be removed, false otherwise.
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001229 */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001230static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
1231 struct batadv_orig_node *orig_node)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001232{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001233 struct batadv_neigh_node *best_neigh_node;
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001234 struct batadv_hard_iface *hard_iface;
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001235 bool changed_ifinfo, changed_neigh;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001236
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001237 if (batadv_has_timed_out(orig_node->last_seen,
1238 2 * BATADV_PURGE_TIMEOUT)) {
Sven Eckelmann39c75a52012-06-03 22:19:22 +02001239 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
Sven Eckelmann1eda58b2012-05-12 13:48:58 +02001240 "Originator timeout: originator %pM, last_seen %u\n",
1241 orig_node->orig,
1242 jiffies_to_msecs(orig_node->last_seen));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001243 return true;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001244 }
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001245 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
1246 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001247
Simon Wunderlich7b955a92014-03-26 15:46:23 +01001248 if (!changed_ifinfo && !changed_neigh)
Simon Wunderlich89652332013-11-13 19:14:46 +01001249 return false;
1250
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001251 /* first for NULL ... */
Simon Wunderlich89652332013-11-13 19:14:46 +01001252 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
1253 BATADV_IF_DEFAULT);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001254 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
1255 best_neigh_node);
Simon Wunderlich89652332013-11-13 19:14:46 +01001256 if (best_neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001257 batadv_neigh_node_put(best_neigh_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001258
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001259 /* ... then for all other interfaces. */
1260 rcu_read_lock();
1261 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
1262 if (hard_iface->if_status != BATADV_IF_ACTIVE)
1263 continue;
1264
1265 if (hard_iface->soft_iface != bat_priv->soft_iface)
1266 continue;
1267
Sven Eckelmann27353442016-03-05 16:09:16 +01001268 if (!kref_get_unless_zero(&hard_iface->refcount))
1269 continue;
1270
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001271 best_neigh_node = batadv_find_best_neighbor(bat_priv,
1272 orig_node,
1273 hard_iface);
1274 batadv_update_route(bat_priv, orig_node, hard_iface,
1275 best_neigh_node);
1276 if (best_neigh_node)
Sven Eckelmann25bb2502016-01-17 11:01:11 +01001277 batadv_neigh_node_put(best_neigh_node);
Sven Eckelmann27353442016-03-05 16:09:16 +01001278
1279 batadv_hardif_put(hard_iface);
Simon Wunderlich7351a4822013-11-13 19:14:47 +01001280 }
1281 rcu_read_unlock();
1282
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001283 return false;
1284}
1285
Sven Eckelmann56303d32012-06-05 22:31:31 +02001286static void _batadv_purge_orig(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001287{
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001288 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001289 struct hlist_node *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001290 struct hlist_head *head;
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001291 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmann56303d32012-06-05 22:31:31 +02001292 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001293 u32 i;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001294
1295 if (!hash)
1296 return;
1297
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001298 /* for all origins... */
1299 for (i = 0; i < hash->size; i++) {
1300 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001301 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001302
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001303 spin_lock_bh(list_lock);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001304 hlist_for_each_entry_safe(orig_node, node_tmp,
Marek Lindner7aadf882011-02-18 12:28:09 +00001305 head, hash_entry) {
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001306 if (batadv_purge_orig_node(bat_priv, orig_node)) {
Marek Lindner414254e2013-04-23 21:39:58 +08001307 batadv_gw_node_delete(bat_priv, orig_node);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001308 hlist_del_rcu(&orig_node->hash_entry);
Linus Lüssing9d31b3c2014-12-13 23:32:15 +01001309 batadv_tt_global_del_orig(orig_node->bat_priv,
1310 orig_node, -1,
1311 "originator timed out");
Sven Eckelmann5d967312016-01-17 11:01:09 +01001312 batadv_orig_node_put(orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001313 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001314 }
Martin Hundebøll610bfc6bc2013-05-23 16:53:02 +02001315
1316 batadv_frag_purge_orig(orig_node,
1317 batadv_frag_check_entry);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001318 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001319 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001320 }
1321
Sven Eckelmann7cf06bc2012-05-12 02:09:29 +02001322 batadv_gw_election(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001323}
1324
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001325static void batadv_purge_orig(struct work_struct *work)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001326{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001327 struct delayed_work *delayed_work;
1328 struct batadv_priv *bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001329
Geliang Tang4ba4bc02015-12-28 23:43:37 +08001330 delayed_work = to_delayed_work(work);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001331 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001332 _batadv_purge_orig(bat_priv);
Antonio Quartulli72414442012-12-25 13:14:37 +01001333 queue_delayed_work(batadv_event_workqueue,
1334 &bat_priv->orig_work,
1335 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001336}
1337
Sven Eckelmann56303d32012-06-05 22:31:31 +02001338void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001339{
Sven Eckelmann03fc7f82012-05-12 18:34:00 +02001340 _batadv_purge_orig(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001341}
1342
Sven Eckelmann7d211ef2012-05-12 02:09:34 +02001343int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001344{
1345 struct net_device *net_dev = (struct net_device *)seq->private;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001346 struct batadv_priv *bat_priv = netdev_priv(net_dev);
Sven Eckelmann56303d32012-06-05 22:31:31 +02001347 struct batadv_hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001348
Marek Lindner30da63a2012-08-03 17:15:46 +02001349 primary_if = batadv_seq_print_text_primary_if_get(seq);
1350 if (!primary_if)
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001351 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001352
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001353 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
Sven Eckelmann42d0b042012-06-03 22:19:17 +02001354 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001355 primary_if->net_dev->dev_addr, net_dev->name,
Antonio Quartulli29824a52016-05-25 23:27:31 +08001356 bat_priv->algo_ops->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001357
Sven Eckelmann82047ad2016-01-17 11:01:10 +01001358 batadv_hardif_put(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001359
Antonio Quartulli29824a52016-05-25 23:27:31 +08001360 if (!bat_priv->algo_ops->orig.print) {
Antonio Quartulli737a2a222013-09-02 12:15:03 +02001361 seq_puts(seq,
1362 "No printing function for this routing protocol\n");
1363 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001364 }
1365
Antonio Quartulli29824a52016-05-25 23:27:31 +08001366 bat_priv->algo_ops->orig.print(bat_priv, seq, BATADV_IF_DEFAULT);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001367
Marek Lindner30da63a2012-08-03 17:15:46 +02001368 return 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001369}
1370
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001371/**
1372 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1373 * outgoing interface
1374 * @seq: debugfs table seq_file struct
1375 * @offset: not used
1376 *
Sven Eckelmann62fe7102015-09-15 19:00:48 +02001377 * Return: 0
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001378 */
1379int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1380{
1381 struct net_device *net_dev = (struct net_device *)seq->private;
1382 struct batadv_hard_iface *hard_iface;
1383 struct batadv_priv *bat_priv;
1384
1385 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1386
1387 if (!hard_iface || !hard_iface->soft_iface) {
1388 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1389 goto out;
1390 }
1391
1392 bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartulli29824a52016-05-25 23:27:31 +08001393 if (!bat_priv->algo_ops->orig.print) {
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001394 seq_puts(seq,
1395 "No printing function for this routing protocol\n");
1396 goto out;
1397 }
1398
1399 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1400 seq_puts(seq, "Interface not active\n");
1401 goto out;
1402 }
1403
1404 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1405 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1406 hard_iface->net_dev->dev_addr,
Antonio Quartulli29824a52016-05-25 23:27:31 +08001407 hard_iface->soft_iface->name, bat_priv->algo_ops->name);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001408
Antonio Quartulli29824a52016-05-25 23:27:31 +08001409 bat_priv->algo_ops->orig.print(bat_priv, seq, hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001410
1411out:
Marek Lindner16a41422014-04-24 03:44:25 +08001412 if (hard_iface)
Sven Eckelmann82047ad2016-01-17 11:01:10 +01001413 batadv_hardif_put(hard_iface);
Simon Wunderlichcb1c92e2013-11-21 11:52:16 +01001414 return 0;
1415}
1416
Matthias Schiffer85cf8c82016-07-03 13:31:39 +02001417/**
1418 * batadv_orig_dump - Dump to netlink the originator infos for a specific
1419 * outgoing interface
1420 * @msg: message to dump into
1421 * @cb: parameters for the dump
1422 *
1423 * Return: 0 or error value
1424 */
1425int batadv_orig_dump(struct sk_buff *msg, struct netlink_callback *cb)
1426{
1427 struct net *net = sock_net(cb->skb->sk);
1428 struct net_device *soft_iface;
1429 struct net_device *hard_iface = NULL;
1430 struct batadv_hard_iface *hardif = BATADV_IF_DEFAULT;
1431 struct batadv_priv *bat_priv;
1432 struct batadv_hard_iface *primary_if = NULL;
1433 int ret;
1434 int ifindex, hard_ifindex;
1435
1436 ifindex = batadv_netlink_get_ifindex(cb->nlh, BATADV_ATTR_MESH_IFINDEX);
1437 if (!ifindex)
1438 return -EINVAL;
1439
1440 soft_iface = dev_get_by_index(net, ifindex);
1441 if (!soft_iface || !batadv_softif_is_valid(soft_iface)) {
1442 ret = -ENODEV;
1443 goto out;
1444 }
1445
1446 bat_priv = netdev_priv(soft_iface);
1447
1448 primary_if = batadv_primary_if_get_selected(bat_priv);
1449 if (!primary_if || primary_if->if_status != BATADV_IF_ACTIVE) {
1450 ret = -ENOENT;
1451 goto out;
1452 }
1453
1454 hard_ifindex = batadv_netlink_get_ifindex(cb->nlh,
1455 BATADV_ATTR_HARD_IFINDEX);
1456 if (hard_ifindex) {
1457 hard_iface = dev_get_by_index(net, hard_ifindex);
1458 if (hard_iface)
1459 hardif = batadv_hardif_get_by_netdev(hard_iface);
1460
1461 if (!hardif) {
1462 ret = -ENODEV;
1463 goto out;
1464 }
1465
1466 if (hardif->soft_iface != soft_iface) {
1467 ret = -ENOENT;
1468 goto out;
1469 }
1470 }
1471
1472 if (!bat_priv->algo_ops->orig.dump) {
1473 ret = -EOPNOTSUPP;
1474 goto out;
1475 }
1476
1477 bat_priv->algo_ops->orig.dump(msg, cb, bat_priv, hardif);
1478
1479 ret = msg->len;
1480
1481 out:
1482 if (hardif)
1483 batadv_hardif_put(hardif);
1484 if (hard_iface)
1485 dev_put(hard_iface);
1486 if (primary_if)
1487 batadv_hardif_put(primary_if);
1488 if (soft_iface)
1489 dev_put(soft_iface);
1490
1491 return ret;
1492}
1493
Sven Eckelmann56303d32012-06-05 22:31:31 +02001494int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1495 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001496{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001497 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Antonio Quartulli29824a52016-05-25 23:27:31 +08001498 struct batadv_algo_ops *bao = bat_priv->algo_ops;
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001499 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001500 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001501 struct batadv_orig_node *orig_node;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001502 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001503 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001504
1505 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001506 * if_num
1507 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001508 for (i = 0; i < hash->size; i++) {
1509 head = &hash->table[i];
1510
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001511 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001512 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001513 ret = 0;
Antonio Quartulli29824a52016-05-25 23:27:31 +08001514 if (bao->orig.add_if)
1515 ret = bao->orig.add_if(orig_node, max_if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001516 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001517 goto err;
1518 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001519 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001520 }
1521
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001522 return 0;
1523
1524err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001525 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001526 return -ENOMEM;
1527}
1528
Sven Eckelmann56303d32012-06-05 22:31:31 +02001529int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1530 int max_if_num)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001531{
Sven Eckelmann56303d32012-06-05 22:31:31 +02001532 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmann5bf74e92012-06-05 22:31:28 +02001533 struct batadv_hashtable *hash = bat_priv->orig_hash;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001534 struct hlist_head *head;
Sven Eckelmann56303d32012-06-05 22:31:31 +02001535 struct batadv_hard_iface *hard_iface_tmp;
1536 struct batadv_orig_node *orig_node;
Antonio Quartulli29824a52016-05-25 23:27:31 +08001537 struct batadv_algo_ops *bao = bat_priv->algo_ops;
Sven Eckelmann6b5e9712015-05-26 18:34:26 +02001538 u32 i;
Antonio Quartullic90681b2011-10-05 17:05:25 +02001539 int ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001540
1541 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
Sven Eckelmann9cfc7bd2012-05-12 02:09:43 +02001542 * if_num
1543 */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001544 for (i = 0; i < hash->size; i++) {
1545 head = &hash->table[i];
1546
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001547 rcu_read_lock();
Sasha Levinb67bfe02013-02-27 17:06:00 -08001548 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
Antonio Quartullid0015fd2013-09-03 11:10:23 +02001549 ret = 0;
Antonio Quartulli29824a52016-05-25 23:27:31 +08001550 if (bao->orig.del_if)
1551 ret = bao->orig.del_if(orig_node, max_if_num,
1552 hard_iface->if_num);
Sven Eckelmann5346c352012-05-05 13:27:28 +02001553 if (ret == -ENOMEM)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001554 goto err;
1555 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001556 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001557 }
1558
1559 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1560 rcu_read_lock();
Sven Eckelmann3193e8f2012-05-12 02:09:42 +02001561 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
Sven Eckelmanne9a4f292012-06-03 22:19:19 +02001562 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001563 continue;
1564
Marek Lindnere6c10f42011-02-18 12:33:20 +00001565 if (hard_iface == hard_iface_tmp)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001566 continue;
1567
Marek Lindnere6c10f42011-02-18 12:33:20 +00001568 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001569 continue;
1570
Marek Lindnere6c10f42011-02-18 12:33:20 +00001571 if (hard_iface_tmp->if_num > hard_iface->if_num)
1572 hard_iface_tmp->if_num--;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001573 }
1574 rcu_read_unlock();
1575
Marek Lindnere6c10f42011-02-18 12:33:20 +00001576 hard_iface->if_num = -1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001577 return 0;
1578
1579err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +00001580 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001581 return -ENOMEM;
1582}