blob: 71dfc24e961ba777f5cc5580a77fe87fa1a74379 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2009-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22/* increase the reference counter for this originator */
23
24#include "main.h"
25#include "originator.h"
26#include "hash.h"
27#include "translation-table.h"
28#include "routing.h"
29#include "gateway_client.h"
30#include "hard-interface.h"
31#include "unicast.h"
32#include "soft-interface.h"
33
34static void purge_orig(struct work_struct *work);
35
36static void start_purge_timer(struct bat_priv *bat_priv)
37{
38 INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig);
39 queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ);
40}
41
42int originator_init(struct bat_priv *bat_priv)
43{
44 if (bat_priv->orig_hash)
45 return 1;
46
47 spin_lock_bh(&bat_priv->orig_hash_lock);
48 bat_priv->orig_hash = hash_new(1024);
49
50 if (!bat_priv->orig_hash)
51 goto err;
52
53 spin_unlock_bh(&bat_priv->orig_hash_lock);
54 start_purge_timer(bat_priv);
55 return 1;
56
57err:
58 spin_unlock_bh(&bat_priv->orig_hash_lock);
59 return 0;
60}
61
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000062void neigh_node_free_ref(struct kref *refcount)
63{
64 struct neigh_node *neigh_node;
65
66 neigh_node = container_of(refcount, struct neigh_node, refcount);
67 kfree(neigh_node);
68}
69
Marek Lindnerf987ed62010-12-12 21:57:12 +000070static void neigh_node_free_rcu(struct rcu_head *rcu)
71{
72 struct neigh_node *neigh_node;
73
74 neigh_node = container_of(rcu, struct neigh_node, rcu);
75 kref_put(&neigh_node->refcount, neigh_node_free_ref);
76}
77
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000078struct neigh_node *create_neighbor(struct orig_node *orig_node,
79 struct orig_node *orig_neigh_node,
80 uint8_t *neigh,
81 struct batman_if *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000082{
83 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
84 struct neigh_node *neigh_node;
85
86 bat_dbg(DBG_BATMAN, bat_priv,
87 "Creating new last-hop neighbor of originator\n");
88
89 neigh_node = kzalloc(sizeof(struct neigh_node), GFP_ATOMIC);
90 if (!neigh_node)
91 return NULL;
92
Marek Lindner9591a792010-12-12 21:57:11 +000093 INIT_HLIST_NODE(&neigh_node->list);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000094
95 memcpy(neigh_node->addr, neigh, ETH_ALEN);
96 neigh_node->orig_node = orig_neigh_node;
97 neigh_node->if_incoming = if_incoming;
Marek Lindnera8e7f4b2010-12-12 21:57:10 +000098 kref_init(&neigh_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000099
Marek Lindnerf987ed62010-12-12 21:57:12 +0000100 spin_lock_bh(&orig_node->neigh_list_lock);
101 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
102 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000103 return neigh_node;
104}
105
Marek Lindner16b1aba2011-01-19 20:01:42 +0000106void orig_node_free_ref(struct kref *refcount)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107{
Marek Lindner9591a792010-12-12 21:57:11 +0000108 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000109 struct neigh_node *neigh_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000110 struct orig_node *orig_node;
111
112 orig_node = container_of(refcount, struct orig_node, refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000113
Marek Lindnerf987ed62010-12-12 21:57:12 +0000114 spin_lock_bh(&orig_node->neigh_list_lock);
115
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000116 /* for all neighbors towards this originator ... */
Marek Lindner9591a792010-12-12 21:57:11 +0000117 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
118 &orig_node->neigh_list, list) {
Marek Lindnerf987ed62010-12-12 21:57:12 +0000119 hlist_del_rcu(&neigh_node->list);
120 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000121 }
122
Marek Lindnerf987ed62010-12-12 21:57:12 +0000123 spin_unlock_bh(&orig_node->neigh_list_lock);
124
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000125 frag_list_free(&orig_node->frag_list);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000126 hna_global_del_orig(orig_node->bat_priv, orig_node,
127 "originator timed out");
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000128
129 kfree(orig_node->bcast_own);
130 kfree(orig_node->bcast_own_sum);
131 kfree(orig_node);
132}
133
134void originator_free(struct bat_priv *bat_priv)
135{
Marek Lindner16b1aba2011-01-19 20:01:42 +0000136 struct hashtable_t *hash = bat_priv->orig_hash;
137 struct hlist_node *walk, *safe;
138 struct hlist_head *head;
139 struct element_t *bucket;
140 spinlock_t *list_lock; /* spinlock to protect write access */
141 struct orig_node *orig_node;
142 int i;
143
144 if (!hash)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000145 return;
146
147 cancel_delayed_work_sync(&bat_priv->orig_work);
148
149 spin_lock_bh(&bat_priv->orig_hash_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000150 bat_priv->orig_hash = NULL;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000151
152 for (i = 0; i < hash->size; i++) {
153 head = &hash->table[i];
154 list_lock = &hash->list_locks[i];
155
156 spin_lock_bh(list_lock);
157 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
158 orig_node = bucket->data;
159
160 hlist_del_rcu(walk);
161 call_rcu(&bucket->rcu, bucket_free_rcu);
162 kref_put(&orig_node->refcount, orig_node_free_ref);
163 }
164 spin_unlock_bh(list_lock);
165 }
166
167 hash_destroy(hash);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000168 spin_unlock_bh(&bat_priv->orig_hash_lock);
169}
170
Marek Lindner16b1aba2011-01-19 20:01:42 +0000171static void bucket_free_orig_rcu(struct rcu_head *rcu)
172{
173 struct element_t *bucket;
174 struct orig_node *orig_node;
175
176 bucket = container_of(rcu, struct element_t, rcu);
177 orig_node = bucket->data;
178
179 kref_put(&orig_node->refcount, orig_node_free_ref);
180 kfree(bucket);
181}
182
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000183/* this function finds or creates an originator entry for the given
184 * address if it does not exits */
185struct orig_node *get_orig_node(struct bat_priv *bat_priv, uint8_t *addr)
186{
187 struct orig_node *orig_node;
188 int size;
189 int hash_added;
190
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000191 rcu_read_lock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000192 orig_node = ((struct orig_node *)hash_find(bat_priv->orig_hash,
193 compare_orig, choose_orig,
194 addr));
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000195 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000196
Marek Lindner16b1aba2011-01-19 20:01:42 +0000197 if (orig_node) {
198 kref_get(&orig_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000199 return orig_node;
Marek Lindner16b1aba2011-01-19 20:01:42 +0000200 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000201
202 bat_dbg(DBG_BATMAN, bat_priv,
203 "Creating new originator: %pM\n", addr);
204
205 orig_node = kzalloc(sizeof(struct orig_node), GFP_ATOMIC);
206 if (!orig_node)
207 return NULL;
208
Marek Lindner9591a792010-12-12 21:57:11 +0000209 INIT_HLIST_HEAD(&orig_node->neigh_list);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000210 spin_lock_init(&orig_node->ogm_cnt_lock);
Marek Lindnerf987ed62010-12-12 21:57:12 +0000211 spin_lock_init(&orig_node->neigh_list_lock);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000212 kref_init(&orig_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000213
Marek Lindner16b1aba2011-01-19 20:01:42 +0000214 orig_node->bat_priv = bat_priv;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000215 memcpy(orig_node->orig, addr, ETH_ALEN);
216 orig_node->router = NULL;
217 orig_node->hna_buff = NULL;
218 orig_node->bcast_seqno_reset = jiffies - 1
219 - msecs_to_jiffies(RESET_PROTECTION_MS);
220 orig_node->batman_seqno_reset = jiffies - 1
221 - msecs_to_jiffies(RESET_PROTECTION_MS);
222
223 size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS;
224
225 orig_node->bcast_own = kzalloc(size, GFP_ATOMIC);
226 if (!orig_node->bcast_own)
227 goto free_orig_node;
228
229 size = bat_priv->num_ifaces * sizeof(uint8_t);
230 orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC);
231
232 INIT_LIST_HEAD(&orig_node->frag_list);
233 orig_node->last_frag_packet = 0;
234
235 if (!orig_node->bcast_own_sum)
236 goto free_bcast_own;
237
238 hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig,
239 orig_node);
240 if (hash_added < 0)
241 goto free_bcast_own_sum;
242
Marek Lindner16b1aba2011-01-19 20:01:42 +0000243 /* extra reference for return */
244 kref_get(&orig_node->refcount);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000245 return orig_node;
246free_bcast_own_sum:
247 kfree(orig_node->bcast_own_sum);
248free_bcast_own:
249 kfree(orig_node->bcast_own);
250free_orig_node:
251 kfree(orig_node);
252 return NULL;
253}
254
255static bool purge_orig_neighbors(struct bat_priv *bat_priv,
256 struct orig_node *orig_node,
257 struct neigh_node **best_neigh_node)
258{
Marek Lindner9591a792010-12-12 21:57:11 +0000259 struct hlist_node *node, *node_tmp;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000260 struct neigh_node *neigh_node;
261 bool neigh_purged = false;
262
263 *best_neigh_node = NULL;
264
Marek Lindnerf987ed62010-12-12 21:57:12 +0000265 spin_lock_bh(&orig_node->neigh_list_lock);
266
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000267 /* for all neighbors towards this originator ... */
Marek Lindner9591a792010-12-12 21:57:11 +0000268 hlist_for_each_entry_safe(neigh_node, node, node_tmp,
269 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000270
271 if ((time_after(jiffies,
272 neigh_node->last_valid + PURGE_TIMEOUT * HZ)) ||
273 (neigh_node->if_incoming->if_status == IF_INACTIVE) ||
Marek Lindner1a241a52011-01-19 19:16:10 +0000274 (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) ||
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000275 (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) {
276
Marek Lindner1a241a52011-01-19 19:16:10 +0000277 if ((neigh_node->if_incoming->if_status ==
278 IF_INACTIVE) ||
279 (neigh_node->if_incoming->if_status ==
280 IF_NOT_IN_USE) ||
281 (neigh_node->if_incoming->if_status ==
282 IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000283 bat_dbg(DBG_BATMAN, bat_priv,
284 "neighbor purge: originator %pM, "
285 "neighbor: %pM, iface: %s\n",
286 orig_node->orig, neigh_node->addr,
287 neigh_node->if_incoming->net_dev->name);
288 else
289 bat_dbg(DBG_BATMAN, bat_priv,
290 "neighbor timeout: originator %pM, "
291 "neighbor: %pM, last_valid: %lu\n",
292 orig_node->orig, neigh_node->addr,
293 (neigh_node->last_valid / HZ));
294
295 neigh_purged = true;
Marek Lindner9591a792010-12-12 21:57:11 +0000296
Marek Lindnerf987ed62010-12-12 21:57:12 +0000297 hlist_del_rcu(&neigh_node->list);
298 call_rcu(&neigh_node->rcu, neigh_node_free_rcu);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000299 } else {
300 if ((!*best_neigh_node) ||
301 (neigh_node->tq_avg > (*best_neigh_node)->tq_avg))
302 *best_neigh_node = neigh_node;
303 }
304 }
Marek Lindnerf987ed62010-12-12 21:57:12 +0000305
306 spin_unlock_bh(&orig_node->neigh_list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000307 return neigh_purged;
308}
309
310static bool purge_orig_node(struct bat_priv *bat_priv,
311 struct orig_node *orig_node)
312{
313 struct neigh_node *best_neigh_node;
314
315 if (time_after(jiffies,
316 orig_node->last_valid + 2 * PURGE_TIMEOUT * HZ)) {
317
318 bat_dbg(DBG_BATMAN, bat_priv,
319 "Originator timeout: originator %pM, last_valid %lu\n",
320 orig_node->orig, (orig_node->last_valid / HZ));
321 return true;
322 } else {
323 if (purge_orig_neighbors(bat_priv, orig_node,
324 &best_neigh_node)) {
325 update_routes(bat_priv, orig_node,
326 best_neigh_node,
327 orig_node->hna_buff,
328 orig_node->hna_buff_len);
329 /* update bonding candidates, we could have lost
330 * some candidates. */
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000331 update_bonding_candidates(orig_node);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000332 }
333 }
334
335 return false;
336}
337
338static void _purge_orig(struct bat_priv *bat_priv)
339{
340 struct hashtable_t *hash = bat_priv->orig_hash;
341 struct hlist_node *walk, *safe;
342 struct hlist_head *head;
343 struct element_t *bucket;
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000344 spinlock_t *list_lock; /* spinlock to protect write access */
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000345 struct orig_node *orig_node;
346 int i;
347
348 if (!hash)
349 return;
350
351 spin_lock_bh(&bat_priv->orig_hash_lock);
352
353 /* for all origins... */
354 for (i = 0; i < hash->size; i++) {
355 head = &hash->table[i];
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000356 list_lock = &hash->list_locks[i];
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000357
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000358 spin_lock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000359 hlist_for_each_entry_safe(bucket, walk, safe, head, hlist) {
360 orig_node = bucket->data;
361
362 if (purge_orig_node(bat_priv, orig_node)) {
363 if (orig_node->gw_flags)
364 gw_node_delete(bat_priv, orig_node);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000365 hlist_del_rcu(walk);
Marek Lindner16b1aba2011-01-19 20:01:42 +0000366 call_rcu(&bucket->rcu, bucket_free_orig_rcu);
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000367 continue;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000368 }
369
370 if (time_after(jiffies, orig_node->last_frag_packet +
371 msecs_to_jiffies(FRAG_TIMEOUT)))
372 frag_list_free(&orig_node->frag_list);
373 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000374 spin_unlock_bh(list_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000375 }
376
377 spin_unlock_bh(&bat_priv->orig_hash_lock);
378
379 gw_node_purge(bat_priv);
380 gw_election(bat_priv);
381
382 softif_neigh_purge(bat_priv);
383}
384
385static void purge_orig(struct work_struct *work)
386{
387 struct delayed_work *delayed_work =
388 container_of(work, struct delayed_work, work);
389 struct bat_priv *bat_priv =
390 container_of(delayed_work, struct bat_priv, orig_work);
391
392 _purge_orig(bat_priv);
393 start_purge_timer(bat_priv);
394}
395
396void purge_orig_ref(struct bat_priv *bat_priv)
397{
398 _purge_orig(bat_priv);
399}
400
401int orig_seq_print_text(struct seq_file *seq, void *offset)
402{
403 struct net_device *net_dev = (struct net_device *)seq->private;
404 struct bat_priv *bat_priv = netdev_priv(net_dev);
405 struct hashtable_t *hash = bat_priv->orig_hash;
Marek Lindner9591a792010-12-12 21:57:11 +0000406 struct hlist_node *walk, *node;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000407 struct hlist_head *head;
408 struct element_t *bucket;
409 struct orig_node *orig_node;
410 struct neigh_node *neigh_node;
411 int batman_count = 0;
412 int last_seen_secs;
413 int last_seen_msecs;
414 int i;
415
416 if ((!bat_priv->primary_if) ||
417 (bat_priv->primary_if->if_status != IF_ACTIVE)) {
418 if (!bat_priv->primary_if)
419 return seq_printf(seq, "BATMAN mesh %s disabled - "
420 "please specify interfaces to enable it\n",
421 net_dev->name);
422
423 return seq_printf(seq, "BATMAN mesh %s "
424 "disabled - primary interface not active\n",
425 net_dev->name);
426 }
427
428 seq_printf(seq, "[B.A.T.M.A.N. adv %s%s, MainIF/MAC: %s/%pM (%s)]\n",
429 SOURCE_VERSION, REVISION_VERSION_STR,
430 bat_priv->primary_if->net_dev->name,
431 bat_priv->primary_if->net_dev->dev_addr, net_dev->name);
432 seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n",
433 "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop",
434 "outgoingIF", "Potential nexthops");
435
436 spin_lock_bh(&bat_priv->orig_hash_lock);
437
438 for (i = 0; i < hash->size; i++) {
439 head = &hash->table[i];
440
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000441 rcu_read_lock();
442 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000443 orig_node = bucket->data;
444
445 if (!orig_node->router)
446 continue;
447
448 if (orig_node->router->tq_avg == 0)
449 continue;
450
451 last_seen_secs = jiffies_to_msecs(jiffies -
452 orig_node->last_valid) / 1000;
453 last_seen_msecs = jiffies_to_msecs(jiffies -
454 orig_node->last_valid) % 1000;
455
456 neigh_node = orig_node->router;
457 seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:",
458 orig_node->orig, last_seen_secs,
459 last_seen_msecs, neigh_node->tq_avg,
460 neigh_node->addr,
461 neigh_node->if_incoming->net_dev->name);
462
Marek Lindnerf987ed62010-12-12 21:57:12 +0000463 hlist_for_each_entry_rcu(neigh_node, node,
464 &orig_node->neigh_list, list) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000465 seq_printf(seq, " %pM (%3i)", neigh_node->addr,
466 neigh_node->tq_avg);
467 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000468
469 seq_printf(seq, "\n");
470 batman_count++;
471 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000472 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473 }
474
475 spin_unlock_bh(&bat_priv->orig_hash_lock);
476
477 if ((batman_count == 0))
478 seq_printf(seq, "No batman nodes in range ...\n");
479
480 return 0;
481}
482
483static int orig_node_add_if(struct orig_node *orig_node, int max_if_num)
484{
485 void *data_ptr;
486
487 data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS,
488 GFP_ATOMIC);
489 if (!data_ptr) {
490 pr_err("Can't resize orig: out of memory\n");
491 return -1;
492 }
493
494 memcpy(data_ptr, orig_node->bcast_own,
495 (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS);
496 kfree(orig_node->bcast_own);
497 orig_node->bcast_own = data_ptr;
498
499 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
500 if (!data_ptr) {
501 pr_err("Can't resize orig: out of memory\n");
502 return -1;
503 }
504
505 memcpy(data_ptr, orig_node->bcast_own_sum,
506 (max_if_num - 1) * sizeof(uint8_t));
507 kfree(orig_node->bcast_own_sum);
508 orig_node->bcast_own_sum = data_ptr;
509
510 return 0;
511}
512
513int orig_hash_add_if(struct batman_if *batman_if, int max_if_num)
514{
515 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
516 struct hashtable_t *hash = bat_priv->orig_hash;
517 struct hlist_node *walk;
518 struct hlist_head *head;
519 struct element_t *bucket;
520 struct orig_node *orig_node;
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000521 int i, ret;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000522
523 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
524 * if_num */
525 spin_lock_bh(&bat_priv->orig_hash_lock);
526
527 for (i = 0; i < hash->size; i++) {
528 head = &hash->table[i];
529
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000530 rcu_read_lock();
531 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000532 orig_node = bucket->data;
533
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000534 spin_lock_bh(&orig_node->ogm_cnt_lock);
535 ret = orig_node_add_if(orig_node, max_if_num);
536 spin_unlock_bh(&orig_node->ogm_cnt_lock);
537
538 if (ret == -1)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000539 goto err;
540 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000541 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000542 }
543
544 spin_unlock_bh(&bat_priv->orig_hash_lock);
545 return 0;
546
547err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000548 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000549 spin_unlock_bh(&bat_priv->orig_hash_lock);
550 return -ENOMEM;
551}
552
553static int orig_node_del_if(struct orig_node *orig_node,
554 int max_if_num, int del_if_num)
555{
556 void *data_ptr = NULL;
557 int chunk_size;
558
559 /* last interface was removed */
560 if (max_if_num == 0)
561 goto free_bcast_own;
562
563 chunk_size = sizeof(unsigned long) * NUM_WORDS;
564 data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC);
565 if (!data_ptr) {
566 pr_err("Can't resize orig: out of memory\n");
567 return -1;
568 }
569
570 /* copy first part */
571 memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size);
572
573 /* copy second part */
574 memcpy(data_ptr + del_if_num * chunk_size,
575 orig_node->bcast_own + ((del_if_num + 1) * chunk_size),
576 (max_if_num - del_if_num) * chunk_size);
577
578free_bcast_own:
579 kfree(orig_node->bcast_own);
580 orig_node->bcast_own = data_ptr;
581
582 if (max_if_num == 0)
583 goto free_own_sum;
584
585 data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC);
586 if (!data_ptr) {
587 pr_err("Can't resize orig: out of memory\n");
588 return -1;
589 }
590
591 memcpy(data_ptr, orig_node->bcast_own_sum,
592 del_if_num * sizeof(uint8_t));
593
594 memcpy(data_ptr + del_if_num * sizeof(uint8_t),
595 orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)),
596 (max_if_num - del_if_num) * sizeof(uint8_t));
597
598free_own_sum:
599 kfree(orig_node->bcast_own_sum);
600 orig_node->bcast_own_sum = data_ptr;
601
602 return 0;
603}
604
605int orig_hash_del_if(struct batman_if *batman_if, int max_if_num)
606{
607 struct bat_priv *bat_priv = netdev_priv(batman_if->soft_iface);
608 struct hashtable_t *hash = bat_priv->orig_hash;
609 struct hlist_node *walk;
610 struct hlist_head *head;
611 struct element_t *bucket;
612 struct batman_if *batman_if_tmp;
613 struct orig_node *orig_node;
614 int i, ret;
615
616 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
617 * if_num */
618 spin_lock_bh(&bat_priv->orig_hash_lock);
619
620 for (i = 0; i < hash->size; i++) {
621 head = &hash->table[i];
622
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000623 rcu_read_lock();
624 hlist_for_each_entry_rcu(bucket, walk, head, hlist) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000625 orig_node = bucket->data;
626
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000627 spin_lock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000628 ret = orig_node_del_if(orig_node, max_if_num,
629 batman_if->if_num);
Marek Lindner2ae2daf2011-01-19 20:01:42 +0000630 spin_unlock_bh(&orig_node->ogm_cnt_lock);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000631
632 if (ret == -1)
633 goto err;
634 }
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000635 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000636 }
637
638 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
639 rcu_read_lock();
640 list_for_each_entry_rcu(batman_if_tmp, &if_list, list) {
641 if (batman_if_tmp->if_status == IF_NOT_IN_USE)
642 continue;
643
644 if (batman_if == batman_if_tmp)
645 continue;
646
647 if (batman_if->soft_iface != batman_if_tmp->soft_iface)
648 continue;
649
650 if (batman_if_tmp->if_num > batman_if->if_num)
651 batman_if_tmp->if_num--;
652 }
653 rcu_read_unlock();
654
655 batman_if->if_num = -1;
656 spin_unlock_bh(&bat_priv->orig_hash_lock);
657 return 0;
658
659err:
Marek Lindnerfb778ea2011-01-19 20:01:40 +0000660 rcu_read_unlock();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000661 spin_unlock_bh(&bat_priv->orig_hash_lock);
662 return -ENOMEM;
663}