blob: ae0a08c391ea4b951528687a3472d6641a2f806d [file] [log] [blame]
Marek Lindnerfc957272011-07-30 12:04:12 +02001/*
Sven Eckelmann567db7b2012-01-01 00:41:38 +01002 * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
Marek Lindnerfc957272011-07-30 12:04:12 +02003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
Marek Lindnerfc957272011-07-30 12:04:12 +020023#include "translation-table.h"
24#include "ring_buffer.h"
25#include "originator.h"
26#include "routing.h"
27#include "gateway_common.h"
28#include "gateway_client.h"
29#include "hard-interface.h"
30#include "send.h"
Marek Lindner1c280472011-11-28 17:40:17 +080031#include "bat_algo.h"
Marek Lindnerfc957272011-07-30 12:04:12 +020032
Marek Lindner7ae8b282012-03-01 15:35:21 +080033static struct neigh_node *bat_iv_ogm_neigh_new(struct hard_iface *hard_iface,
34 const uint8_t *neigh_addr,
35 struct orig_node *orig_node,
36 struct orig_node *orig_neigh,
37 uint32_t seqno)
38{
39 struct neigh_node *neigh_node;
40
41 neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, seqno);
42 if (!neigh_node)
43 goto out;
44
45 INIT_LIST_HEAD(&neigh_node->bonding_list);
46 spin_lock_init(&neigh_node->tq_lock);
47
48 neigh_node->orig_node = orig_neigh;
49 neigh_node->if_incoming = hard_iface;
50
51 spin_lock_bh(&orig_node->neigh_list_lock);
52 hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
53 spin_unlock_bh(&orig_node->neigh_list_lock);
54
55out:
56 return neigh_node;
57}
58
Marek Lindner77af7572012-02-07 17:20:48 +080059static int bat_iv_ogm_iface_enable(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020060{
61 struct batman_ogm_packet *batman_ogm_packet;
Marek Lindnerd7d32ec2012-02-07 17:20:46 +080062 uint32_t random_seqno;
Marek Lindner77af7572012-02-07 17:20:48 +080063 int res = -1;
Marek Lindnerd7d32ec2012-02-07 17:20:46 +080064
65 /* randomize initial seqno to avoid collision */
66 get_random_bytes(&random_seqno, sizeof(random_seqno));
67 atomic_set(&hard_iface->seqno, random_seqno);
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020068
Marek Lindner76e3d7fc2012-02-07 17:20:50 +080069 hard_iface->packet_len = BATMAN_OGM_HLEN;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020070 hard_iface->packet_buff = kmalloc(hard_iface->packet_len, GFP_ATOMIC);
71
Marek Lindner77af7572012-02-07 17:20:48 +080072 if (!hard_iface->packet_buff)
73 goto out;
74
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020075 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
Marek Lindner1eeb4792012-02-07 17:20:51 +080076 batman_ogm_packet->header.packet_type = BAT_IV_OGM;
Sven Eckelmann76543d12011-11-20 15:47:38 +010077 batman_ogm_packet->header.version = COMPAT_VERSION;
78 batman_ogm_packet->header.ttl = 2;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020079 batman_ogm_packet->flags = NO_FLAGS;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020080 batman_ogm_packet->tq = TQ_MAX_VALUE;
81 batman_ogm_packet->tt_num_changes = 0;
82 batman_ogm_packet->ttvn = 0;
Marek Lindner77af7572012-02-07 17:20:48 +080083
84 res = 0;
85
86out:
87 return res;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020088}
89
Marek Lindner00a50072012-02-07 17:20:47 +080090static void bat_iv_ogm_iface_disable(struct hard_iface *hard_iface)
91{
92 kfree(hard_iface->packet_buff);
93 hard_iface->packet_buff = NULL;
94}
95
Marek Lindnercd8b78e2012-02-07 17:20:49 +080096static void bat_iv_ogm_primary_iface_set(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +020097{
98 struct batman_ogm_packet *batman_ogm_packet;
99
100 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
101 batman_ogm_packet->flags = PRIMARIES_FIRST_HOP;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100102 batman_ogm_packet->header.ttl = TTL;
Marek Lindnerd0b9fd82011-07-30 12:33:33 +0200103}
104
Marek Lindner01c42242011-11-28 21:31:55 +0800105static void bat_iv_ogm_update_mac(struct hard_iface *hard_iface)
Marek Lindnerd0b9fd82011-07-30 12:33:33 +0200106{
107 struct batman_ogm_packet *batman_ogm_packet;
108
109 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
110 memcpy(batman_ogm_packet->orig,
111 hard_iface->net_dev->dev_addr, ETH_ALEN);
112 memcpy(batman_ogm_packet->prev_sender,
113 hard_iface->net_dev->dev_addr, ETH_ALEN);
114}
115
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200116/* when do we schedule our own ogm to be sent */
Marek Lindner01c42242011-11-28 21:31:55 +0800117static unsigned long bat_iv_ogm_emit_send_time(const struct bat_priv *bat_priv)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200118{
119 return jiffies + msecs_to_jiffies(
120 atomic_read(&bat_priv->orig_interval) -
121 JITTER + (random32() % 2*JITTER));
122}
123
124/* when do we schedule a ogm packet to be sent */
Marek Lindner01c42242011-11-28 21:31:55 +0800125static unsigned long bat_iv_ogm_fwd_send_time(void)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200126{
127 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
128}
129
130/* apply hop penalty for a normal link */
131static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
132{
133 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
134 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
135}
136
Marek Lindnerfc957272011-07-30 12:04:12 +0200137/* is there another aggregated packet here? */
Marek Lindner01c42242011-11-28 21:31:55 +0800138static int bat_iv_ogm_aggr_packet(int buff_pos, int packet_len,
139 int tt_num_changes)
Marek Lindnerfc957272011-07-30 12:04:12 +0200140{
Marek Lindner76e3d7fc2012-02-07 17:20:50 +0800141 int next_buff_pos = buff_pos + BATMAN_OGM_HLEN + tt_len(tt_num_changes);
Marek Lindnerfc957272011-07-30 12:04:12 +0200142
143 return (next_buff_pos <= packet_len) &&
144 (next_buff_pos <= MAX_AGGREGATION_BYTES);
145}
146
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200147/* send a batman ogm to a given interface */
Marek Lindner01c42242011-11-28 21:31:55 +0800148static void bat_iv_ogm_send_to_if(struct forw_packet *forw_packet,
149 struct hard_iface *hard_iface)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200150{
151 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
152 char *fwd_str;
153 uint8_t packet_num;
154 int16_t buff_pos;
155 struct batman_ogm_packet *batman_ogm_packet;
156 struct sk_buff *skb;
157
158 if (hard_iface->if_status != IF_ACTIVE)
159 return;
160
161 packet_num = 0;
162 buff_pos = 0;
163 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
164
165 /* adjust all flags and log packets */
Marek Lindner01c42242011-11-28 21:31:55 +0800166 while (bat_iv_ogm_aggr_packet(buff_pos, forw_packet->packet_len,
167 batman_ogm_packet->tt_num_changes)) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200168
169 /* we might have aggregated direct link packets with an
170 * ordinary base packet */
171 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
172 (forw_packet->if_incoming == hard_iface))
173 batman_ogm_packet->flags |= DIRECTLINK;
174 else
175 batman_ogm_packet->flags &= ~DIRECTLINK;
176
177 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
178 "Sending own" :
179 "Forwarding"));
180 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100181 "%s %spacket (originator %pM, seqno %u, TQ %d, TTL %d, IDF %s, ttvn %d) on interface %s [%pM]\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200182 fwd_str, (packet_num > 0 ? "aggregated " : ""),
183 batman_ogm_packet->orig,
184 ntohl(batman_ogm_packet->seqno),
Sven Eckelmann76543d12011-11-20 15:47:38 +0100185 batman_ogm_packet->tq, batman_ogm_packet->header.ttl,
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200186 (batman_ogm_packet->flags & DIRECTLINK ?
187 "on" : "off"),
188 batman_ogm_packet->ttvn, hard_iface->net_dev->name,
189 hard_iface->net_dev->dev_addr);
190
Marek Lindner76e3d7fc2012-02-07 17:20:50 +0800191 buff_pos += BATMAN_OGM_HLEN +
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200192 tt_len(batman_ogm_packet->tt_num_changes);
193 packet_num++;
194 batman_ogm_packet = (struct batman_ogm_packet *)
195 (forw_packet->skb->data + buff_pos);
196 }
197
198 /* create clone because function is called more than once */
199 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
200 if (skb)
201 send_skb_packet(skb, hard_iface, broadcast_addr);
202}
203
204/* send a batman ogm packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800205static void bat_iv_ogm_emit(struct forw_packet *forw_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200206{
207 struct hard_iface *hard_iface;
208 struct net_device *soft_iface;
209 struct bat_priv *bat_priv;
210 struct hard_iface *primary_if = NULL;
211 struct batman_ogm_packet *batman_ogm_packet;
212 unsigned char directlink;
213
214 batman_ogm_packet = (struct batman_ogm_packet *)
215 (forw_packet->skb->data);
216 directlink = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
217
218 if (!forw_packet->if_incoming) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100219 pr_err("Error - can't forward packet: incoming iface not specified\n");
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200220 goto out;
221 }
222
223 soft_iface = forw_packet->if_incoming->soft_iface;
224 bat_priv = netdev_priv(soft_iface);
225
226 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
227 goto out;
228
229 primary_if = primary_if_get_selected(bat_priv);
230 if (!primary_if)
231 goto out;
232
233 /* multihomed peer assumed */
234 /* non-primary OGMs are only broadcasted on their interface */
Sven Eckelmann76543d12011-11-20 15:47:38 +0100235 if ((directlink && (batman_ogm_packet->header.ttl == 1)) ||
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200236 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
237
238 /* FIXME: what about aggregated packets ? */
239 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100240 "%s packet (originator %pM, seqno %u, TTL %d) on interface %s [%pM]\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200241 (forw_packet->own ? "Sending own" : "Forwarding"),
242 batman_ogm_packet->orig,
243 ntohl(batman_ogm_packet->seqno),
Sven Eckelmann76543d12011-11-20 15:47:38 +0100244 batman_ogm_packet->header.ttl,
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200245 forw_packet->if_incoming->net_dev->name,
246 forw_packet->if_incoming->net_dev->dev_addr);
247
248 /* skb is only used once and than forw_packet is free'd */
249 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
250 broadcast_addr);
251 forw_packet->skb = NULL;
252
253 goto out;
254 }
255
256 /* broadcast on every interface */
257 rcu_read_lock();
258 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
259 if (hard_iface->soft_iface != soft_iface)
260 continue;
261
Marek Lindner01c42242011-11-28 21:31:55 +0800262 bat_iv_ogm_send_to_if(forw_packet, hard_iface);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200263 }
264 rcu_read_unlock();
265
266out:
267 if (primary_if)
268 hardif_free_ref(primary_if);
269}
270
271/* return true if new_packet can be aggregated with forw_packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800272static bool bat_iv_ogm_can_aggregate(const struct batman_ogm_packet
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200273 *new_batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800274 struct bat_priv *bat_priv,
275 int packet_len, unsigned long send_time,
276 bool directlink,
277 const struct hard_iface *if_incoming,
278 const struct forw_packet *forw_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200279{
280 struct batman_ogm_packet *batman_ogm_packet;
281 int aggregated_bytes = forw_packet->packet_len + packet_len;
282 struct hard_iface *primary_if = NULL;
283 bool res = false;
284
285 batman_ogm_packet = (struct batman_ogm_packet *)forw_packet->skb->data;
286
287 /**
288 * we can aggregate the current packet to this aggregated packet
289 * if:
290 *
291 * - the send time is within our MAX_AGGREGATION_MS time
292 * - the resulting packet wont be bigger than
293 * MAX_AGGREGATION_BYTES
294 */
295
296 if (time_before(send_time, forw_packet->send_time) &&
297 time_after_eq(send_time + msecs_to_jiffies(MAX_AGGREGATION_MS),
298 forw_packet->send_time) &&
299 (aggregated_bytes <= MAX_AGGREGATION_BYTES)) {
300
301 /**
302 * check aggregation compatibility
303 * -> direct link packets are broadcasted on
304 * their interface only
305 * -> aggregate packet if the current packet is
306 * a "global" packet as well as the base
307 * packet
308 */
309
310 primary_if = primary_if_get_selected(bat_priv);
311 if (!primary_if)
312 goto out;
313
314 /* packets without direct link flag and high TTL
315 * are flooded through the net */
316 if ((!directlink) &&
317 (!(batman_ogm_packet->flags & DIRECTLINK)) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100318 (batman_ogm_packet->header.ttl != 1) &&
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200319
320 /* own packets originating non-primary
321 * interfaces leave only that interface */
322 ((!forw_packet->own) ||
323 (forw_packet->if_incoming == primary_if))) {
324 res = true;
325 goto out;
326 }
327
328 /* if the incoming packet is sent via this one
329 * interface only - we still can aggregate */
330 if ((directlink) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100331 (new_batman_ogm_packet->header.ttl == 1) &&
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200332 (forw_packet->if_incoming == if_incoming) &&
333
334 /* packets from direct neighbors or
335 * own secondary interface packets
336 * (= secondary interface packets in general) */
337 (batman_ogm_packet->flags & DIRECTLINK ||
338 (forw_packet->own &&
339 forw_packet->if_incoming != primary_if))) {
340 res = true;
341 goto out;
342 }
343 }
344
345out:
346 if (primary_if)
347 hardif_free_ref(primary_if);
348 return res;
349}
350
351/* create a new aggregated packet and add this packet to it */
Marek Lindner01c42242011-11-28 21:31:55 +0800352static void bat_iv_ogm_aggregate_new(const unsigned char *packet_buff,
353 int packet_len, unsigned long send_time,
354 bool direct_link,
355 struct hard_iface *if_incoming,
356 int own_packet)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200357{
358 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
359 struct forw_packet *forw_packet_aggr;
360 unsigned char *skb_buff;
361
362 if (!atomic_inc_not_zero(&if_incoming->refcount))
363 return;
364
365 /* own packet should always be scheduled */
366 if (!own_packet) {
367 if (!atomic_dec_not_zero(&bat_priv->batman_queue_left)) {
368 bat_dbg(DBG_BATMAN, bat_priv,
369 "batman packet queue full\n");
370 goto out;
371 }
372 }
373
374 forw_packet_aggr = kmalloc(sizeof(*forw_packet_aggr), GFP_ATOMIC);
375 if (!forw_packet_aggr) {
376 if (!own_packet)
377 atomic_inc(&bat_priv->batman_queue_left);
378 goto out;
379 }
380
381 if ((atomic_read(&bat_priv->aggregated_ogms)) &&
382 (packet_len < MAX_AGGREGATION_BYTES))
383 forw_packet_aggr->skb = dev_alloc_skb(MAX_AGGREGATION_BYTES +
Antonio Quartulli0d125072012-02-18 11:27:34 +0100384 ETH_HLEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200385 else
Antonio Quartulli0d125072012-02-18 11:27:34 +0100386 forw_packet_aggr->skb = dev_alloc_skb(packet_len + ETH_HLEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200387
388 if (!forw_packet_aggr->skb) {
389 if (!own_packet)
390 atomic_inc(&bat_priv->batman_queue_left);
391 kfree(forw_packet_aggr);
392 goto out;
393 }
Antonio Quartulli0d125072012-02-18 11:27:34 +0100394 skb_reserve(forw_packet_aggr->skb, ETH_HLEN);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200395
396 INIT_HLIST_NODE(&forw_packet_aggr->list);
397
398 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
399 forw_packet_aggr->packet_len = packet_len;
400 memcpy(skb_buff, packet_buff, packet_len);
401
402 forw_packet_aggr->own = own_packet;
403 forw_packet_aggr->if_incoming = if_incoming;
404 forw_packet_aggr->num_packets = 0;
405 forw_packet_aggr->direct_link_flags = NO_FLAGS;
406 forw_packet_aggr->send_time = send_time;
407
408 /* save packet direct link flag status */
409 if (direct_link)
410 forw_packet_aggr->direct_link_flags |= 1;
411
412 /* add new packet to packet list */
413 spin_lock_bh(&bat_priv->forw_bat_list_lock);
414 hlist_add_head(&forw_packet_aggr->list, &bat_priv->forw_bat_list);
415 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
416
417 /* start timer for this packet */
418 INIT_DELAYED_WORK(&forw_packet_aggr->delayed_work,
419 send_outstanding_bat_ogm_packet);
420 queue_delayed_work(bat_event_workqueue,
421 &forw_packet_aggr->delayed_work,
422 send_time - jiffies);
423
424 return;
425out:
426 hardif_free_ref(if_incoming);
427}
428
429/* aggregate a new packet into the existing ogm packet */
Marek Lindner01c42242011-11-28 21:31:55 +0800430static void bat_iv_ogm_aggregate(struct forw_packet *forw_packet_aggr,
431 const unsigned char *packet_buff,
432 int packet_len, bool direct_link)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200433{
434 unsigned char *skb_buff;
435
436 skb_buff = skb_put(forw_packet_aggr->skb, packet_len);
437 memcpy(skb_buff, packet_buff, packet_len);
438 forw_packet_aggr->packet_len += packet_len;
439 forw_packet_aggr->num_packets++;
440
441 /* save packet direct link flag status */
442 if (direct_link)
443 forw_packet_aggr->direct_link_flags |=
444 (1 << forw_packet_aggr->num_packets);
445}
446
Marek Lindner01c42242011-11-28 21:31:55 +0800447static void bat_iv_ogm_queue_add(struct bat_priv *bat_priv,
448 unsigned char *packet_buff,
449 int packet_len, struct hard_iface *if_incoming,
450 int own_packet, unsigned long send_time)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200451{
452 /**
453 * _aggr -> pointer to the packet we want to aggregate with
454 * _pos -> pointer to the position in the queue
455 */
456 struct forw_packet *forw_packet_aggr = NULL, *forw_packet_pos = NULL;
457 struct hlist_node *tmp_node;
458 struct batman_ogm_packet *batman_ogm_packet;
459 bool direct_link;
460
461 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
462 direct_link = batman_ogm_packet->flags & DIRECTLINK ? 1 : 0;
463
464 /* find position for the packet in the forward queue */
465 spin_lock_bh(&bat_priv->forw_bat_list_lock);
466 /* own packets are not to be aggregated */
467 if ((atomic_read(&bat_priv->aggregated_ogms)) && (!own_packet)) {
468 hlist_for_each_entry(forw_packet_pos, tmp_node,
469 &bat_priv->forw_bat_list, list) {
Marek Lindner01c42242011-11-28 21:31:55 +0800470 if (bat_iv_ogm_can_aggregate(batman_ogm_packet,
471 bat_priv, packet_len,
472 send_time, direct_link,
473 if_incoming,
474 forw_packet_pos)) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200475 forw_packet_aggr = forw_packet_pos;
476 break;
477 }
478 }
479 }
480
481 /* nothing to aggregate with - either aggregation disabled or no
482 * suitable aggregation packet found */
483 if (!forw_packet_aggr) {
484 /* the following section can run without the lock */
485 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
486
487 /**
488 * if we could not aggregate this packet with one of the others
489 * we hold it back for a while, so that it might be aggregated
490 * later on
491 */
492 if ((!own_packet) &&
493 (atomic_read(&bat_priv->aggregated_ogms)))
494 send_time += msecs_to_jiffies(MAX_AGGREGATION_MS);
495
Marek Lindner01c42242011-11-28 21:31:55 +0800496 bat_iv_ogm_aggregate_new(packet_buff, packet_len,
497 send_time, direct_link,
498 if_incoming, own_packet);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200499 } else {
Marek Lindner01c42242011-11-28 21:31:55 +0800500 bat_iv_ogm_aggregate(forw_packet_aggr, packet_buff,
501 packet_len, direct_link);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200502 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
503 }
504}
505
Marek Lindner01c42242011-11-28 21:31:55 +0800506static void bat_iv_ogm_forward(struct orig_node *orig_node,
507 const struct ethhdr *ethhdr,
508 struct batman_ogm_packet *batman_ogm_packet,
Marek Lindner75cd33f2012-03-01 15:35:16 +0800509 bool is_single_hop_neigh,
510 struct hard_iface *if_incoming)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200511{
512 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
513 struct neigh_node *router;
514 uint8_t in_tq, in_ttl, tq_avg = 0;
515 uint8_t tt_num_changes;
516
Sven Eckelmann76543d12011-11-20 15:47:38 +0100517 if (batman_ogm_packet->header.ttl <= 1) {
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200518 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
519 return;
520 }
521
522 router = orig_node_get_router(orig_node);
523
524 in_tq = batman_ogm_packet->tq;
Sven Eckelmann76543d12011-11-20 15:47:38 +0100525 in_ttl = batman_ogm_packet->header.ttl;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200526 tt_num_changes = batman_ogm_packet->tt_num_changes;
527
Sven Eckelmann76543d12011-11-20 15:47:38 +0100528 batman_ogm_packet->header.ttl--;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200529 memcpy(batman_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
530
531 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
532 * of our best tq value */
533 if (router && router->tq_avg != 0) {
534
535 /* rebroadcast ogm of best ranking neighbor as is */
536 if (!compare_eth(router->addr, ethhdr->h_source)) {
537 batman_ogm_packet->tq = router->tq_avg;
538
539 if (router->last_ttl)
Sven Eckelmann76543d12011-11-20 15:47:38 +0100540 batman_ogm_packet->header.ttl =
541 router->last_ttl - 1;
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200542 }
543
544 tq_avg = router->tq_avg;
545 }
546
547 if (router)
548 neigh_node_free_ref(router);
549
550 /* apply hop penalty */
551 batman_ogm_packet->tq = hop_penalty(batman_ogm_packet->tq, bat_priv);
552
553 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100554 "Forwarding packet: tq_orig: %i, tq_avg: %i, tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200555 in_tq, tq_avg, batman_ogm_packet->tq, in_ttl - 1,
Sven Eckelmann76543d12011-11-20 15:47:38 +0100556 batman_ogm_packet->header.ttl);
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200557
558 batman_ogm_packet->seqno = htonl(batman_ogm_packet->seqno);
559 batman_ogm_packet->tt_crc = htons(batman_ogm_packet->tt_crc);
560
561 /* switch of primaries first hop flag when forwarding */
562 batman_ogm_packet->flags &= ~PRIMARIES_FIRST_HOP;
Marek Lindner75cd33f2012-03-01 15:35:16 +0800563 if (is_single_hop_neigh)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200564 batman_ogm_packet->flags |= DIRECTLINK;
565 else
566 batman_ogm_packet->flags &= ~DIRECTLINK;
567
Marek Lindner01c42242011-11-28 21:31:55 +0800568 bat_iv_ogm_queue_add(bat_priv, (unsigned char *)batman_ogm_packet,
Marek Lindner76e3d7fc2012-02-07 17:20:50 +0800569 BATMAN_OGM_HLEN + tt_len(tt_num_changes),
Marek Lindner01c42242011-11-28 21:31:55 +0800570 if_incoming, 0, bat_iv_ogm_fwd_send_time());
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200571}
572
Marek Lindner01c42242011-11-28 21:31:55 +0800573static void bat_iv_ogm_schedule(struct hard_iface *hard_iface,
574 int tt_num_changes)
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200575{
576 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
577 struct batman_ogm_packet *batman_ogm_packet;
578 struct hard_iface *primary_if;
579 int vis_server;
580
581 vis_server = atomic_read(&bat_priv->vis_mode);
582 primary_if = primary_if_get_selected(bat_priv);
583
584 batman_ogm_packet = (struct batman_ogm_packet *)hard_iface->packet_buff;
585
586 /* change sequence number to network order */
587 batman_ogm_packet->seqno =
588 htonl((uint32_t)atomic_read(&hard_iface->seqno));
589
590 batman_ogm_packet->ttvn = atomic_read(&bat_priv->ttvn);
591 batman_ogm_packet->tt_crc = htons((uint16_t)
592 atomic_read(&bat_priv->tt_crc));
593 if (tt_num_changes >= 0)
594 batman_ogm_packet->tt_num_changes = tt_num_changes;
595
596 if (vis_server == VIS_TYPE_SERVER_SYNC)
597 batman_ogm_packet->flags |= VIS_SERVER;
598 else
599 batman_ogm_packet->flags &= ~VIS_SERVER;
600
601 if ((hard_iface == primary_if) &&
602 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
603 batman_ogm_packet->gw_flags =
604 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
605 else
606 batman_ogm_packet->gw_flags = NO_FLAGS;
607
608 atomic_inc(&hard_iface->seqno);
609
610 slide_own_bcast_window(hard_iface);
Marek Lindner01c42242011-11-28 21:31:55 +0800611 bat_iv_ogm_queue_add(bat_priv, hard_iface->packet_buff,
612 hard_iface->packet_len, hard_iface, 1,
613 bat_iv_ogm_emit_send_time(bat_priv));
Marek Lindnerb9dacc52011-08-03 09:09:30 +0200614
615 if (primary_if)
616 hardif_free_ref(primary_if);
617}
618
Marek Lindner01c42242011-11-28 21:31:55 +0800619static void bat_iv_ogm_orig_update(struct bat_priv *bat_priv,
620 struct orig_node *orig_node,
621 const struct ethhdr *ethhdr,
622 const struct batman_ogm_packet
Marek Lindnerfc957272011-07-30 12:04:12 +0200623 *batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800624 struct hard_iface *if_incoming,
625 const unsigned char *tt_buff,
626 int is_duplicate)
Marek Lindnerfc957272011-07-30 12:04:12 +0200627{
628 struct neigh_node *neigh_node = NULL, *tmp_neigh_node = NULL;
629 struct neigh_node *router = NULL;
630 struct orig_node *orig_node_tmp;
631 struct hlist_node *node;
632 uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
633
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100634 bat_dbg(DBG_BATMAN, bat_priv,
635 "update_originator(): Searching and updating originator entry of received packet\n");
Marek Lindnerfc957272011-07-30 12:04:12 +0200636
637 rcu_read_lock();
638 hlist_for_each_entry_rcu(tmp_neigh_node, node,
639 &orig_node->neigh_list, list) {
640 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
641 (tmp_neigh_node->if_incoming == if_incoming) &&
642 atomic_inc_not_zero(&tmp_neigh_node->refcount)) {
643 if (neigh_node)
644 neigh_node_free_ref(neigh_node);
645 neigh_node = tmp_neigh_node;
646 continue;
647 }
648
649 if (is_duplicate)
650 continue;
651
652 spin_lock_bh(&tmp_neigh_node->tq_lock);
653 ring_buffer_set(tmp_neigh_node->tq_recv,
654 &tmp_neigh_node->tq_index, 0);
655 tmp_neigh_node->tq_avg =
656 ring_buffer_avg(tmp_neigh_node->tq_recv);
657 spin_unlock_bh(&tmp_neigh_node->tq_lock);
658 }
659
660 if (!neigh_node) {
661 struct orig_node *orig_tmp;
662
663 orig_tmp = get_orig_node(bat_priv, ethhdr->h_source);
664 if (!orig_tmp)
665 goto unlock;
666
Marek Lindner7ae8b282012-03-01 15:35:21 +0800667 neigh_node = bat_iv_ogm_neigh_new(if_incoming, ethhdr->h_source,
668 orig_node, orig_tmp,
669 batman_ogm_packet->seqno);
Marek Lindnerfc957272011-07-30 12:04:12 +0200670
671 orig_node_free_ref(orig_tmp);
672 if (!neigh_node)
673 goto unlock;
674 } else
675 bat_dbg(DBG_BATMAN, bat_priv,
676 "Updating existing last-hop neighbor of originator\n");
677
678 rcu_read_unlock();
679
680 orig_node->flags = batman_ogm_packet->flags;
Marek Lindnerd7b2a972012-03-01 15:35:19 +0800681 neigh_node->last_seen = jiffies;
Marek Lindnerfc957272011-07-30 12:04:12 +0200682
683 spin_lock_bh(&neigh_node->tq_lock);
684 ring_buffer_set(neigh_node->tq_recv,
685 &neigh_node->tq_index,
686 batman_ogm_packet->tq);
687 neigh_node->tq_avg = ring_buffer_avg(neigh_node->tq_recv);
688 spin_unlock_bh(&neigh_node->tq_lock);
689
690 if (!is_duplicate) {
Sven Eckelmann76543d12011-11-20 15:47:38 +0100691 orig_node->last_ttl = batman_ogm_packet->header.ttl;
692 neigh_node->last_ttl = batman_ogm_packet->header.ttl;
Marek Lindnerfc957272011-07-30 12:04:12 +0200693 }
694
695 bonding_candidate_add(orig_node, neigh_node);
696
697 /* if this neighbor already is our next hop there is nothing
698 * to change */
699 router = orig_node_get_router(orig_node);
700 if (router == neigh_node)
701 goto update_tt;
702
703 /* if this neighbor does not offer a better TQ we won't consider it */
704 if (router && (router->tq_avg > neigh_node->tq_avg))
705 goto update_tt;
706
707 /* if the TQ is the same and the link not more symmetric we
708 * won't consider it either */
709 if (router && (neigh_node->tq_avg == router->tq_avg)) {
710 orig_node_tmp = router->orig_node;
711 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
712 bcast_own_sum_orig =
713 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
714 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
715
716 orig_node_tmp = neigh_node->orig_node;
717 spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
718 bcast_own_sum_neigh =
719 orig_node_tmp->bcast_own_sum[if_incoming->if_num];
720 spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
721
722 if (bcast_own_sum_orig >= bcast_own_sum_neigh)
723 goto update_tt;
724 }
725
726 update_route(bat_priv, orig_node, neigh_node);
727
728update_tt:
729 /* I have to check for transtable changes only if the OGM has been
730 * sent through a primary interface */
731 if (((batman_ogm_packet->orig != ethhdr->h_source) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +0100732 (batman_ogm_packet->header.ttl > 2)) ||
Marek Lindnerfc957272011-07-30 12:04:12 +0200733 (batman_ogm_packet->flags & PRIMARIES_FIRST_HOP))
734 tt_update_orig(bat_priv, orig_node, tt_buff,
735 batman_ogm_packet->tt_num_changes,
736 batman_ogm_packet->ttvn,
737 batman_ogm_packet->tt_crc);
738
739 if (orig_node->gw_flags != batman_ogm_packet->gw_flags)
740 gw_node_update(bat_priv, orig_node,
741 batman_ogm_packet->gw_flags);
742
743 orig_node->gw_flags = batman_ogm_packet->gw_flags;
744
745 /* restart gateway selection if fast or late switching was enabled */
746 if ((orig_node->gw_flags) &&
747 (atomic_read(&bat_priv->gw_mode) == GW_MODE_CLIENT) &&
748 (atomic_read(&bat_priv->gw_sel_class) > 2))
749 gw_check_election(bat_priv, orig_node);
750
751 goto out;
752
753unlock:
754 rcu_read_unlock();
755out:
756 if (neigh_node)
757 neigh_node_free_ref(neigh_node);
758 if (router)
759 neigh_node_free_ref(router);
760}
761
Marek Lindner01c42242011-11-28 21:31:55 +0800762static int bat_iv_ogm_calc_tq(struct orig_node *orig_node,
763 struct orig_node *orig_neigh_node,
764 struct batman_ogm_packet *batman_ogm_packet,
765 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200766{
767 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
768 struct neigh_node *neigh_node = NULL, *tmp_neigh_node;
769 struct hlist_node *node;
770 uint8_t total_count;
771 uint8_t orig_eq_count, neigh_rq_count, tq_own;
772 int tq_asym_penalty, ret = 0;
773
774 /* find corresponding one hop neighbor */
775 rcu_read_lock();
776 hlist_for_each_entry_rcu(tmp_neigh_node, node,
777 &orig_neigh_node->neigh_list, list) {
778
779 if (!compare_eth(tmp_neigh_node->addr, orig_neigh_node->orig))
780 continue;
781
782 if (tmp_neigh_node->if_incoming != if_incoming)
783 continue;
784
785 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
786 continue;
787
788 neigh_node = tmp_neigh_node;
789 break;
790 }
791 rcu_read_unlock();
792
793 if (!neigh_node)
Marek Lindner7ae8b282012-03-01 15:35:21 +0800794 neigh_node = bat_iv_ogm_neigh_new(if_incoming,
795 orig_neigh_node->orig,
796 orig_neigh_node,
797 orig_neigh_node,
798 batman_ogm_packet->seqno);
Marek Lindnerfc957272011-07-30 12:04:12 +0200799
800 if (!neigh_node)
801 goto out;
802
Marek Lindnerd7b2a972012-03-01 15:35:19 +0800803 /* if orig_node is direct neighbor update neigh_node last_seen */
Marek Lindnerfc957272011-07-30 12:04:12 +0200804 if (orig_node == orig_neigh_node)
Marek Lindnerd7b2a972012-03-01 15:35:19 +0800805 neigh_node->last_seen = jiffies;
Marek Lindnerfc957272011-07-30 12:04:12 +0200806
Marek Lindnerd7b2a972012-03-01 15:35:19 +0800807 orig_node->last_seen = jiffies;
Marek Lindnerfc957272011-07-30 12:04:12 +0200808
809 /* find packet count of corresponding one hop neighbor */
810 spin_lock_bh(&orig_node->ogm_cnt_lock);
811 orig_eq_count = orig_neigh_node->bcast_own_sum[if_incoming->if_num];
812 neigh_rq_count = neigh_node->real_packet_count;
813 spin_unlock_bh(&orig_node->ogm_cnt_lock);
814
815 /* pay attention to not get a value bigger than 100 % */
816 total_count = (orig_eq_count > neigh_rq_count ?
817 neigh_rq_count : orig_eq_count);
818
819 /* if we have too few packets (too less data) we set tq_own to zero */
820 /* if we receive too few packets it is not considered bidirectional */
821 if ((total_count < TQ_LOCAL_BIDRECT_SEND_MINIMUM) ||
822 (neigh_rq_count < TQ_LOCAL_BIDRECT_RECV_MINIMUM))
823 tq_own = 0;
824 else
825 /* neigh_node->real_packet_count is never zero as we
826 * only purge old information when getting new
827 * information */
828 tq_own = (TQ_MAX_VALUE * total_count) / neigh_rq_count;
829
Sven Eckelmann21a12362012-03-07 09:07:46 +0100830 /* 1 - ((1-x) ** 3), normalized to TQ_MAX_VALUE this does
Marek Lindnerfc957272011-07-30 12:04:12 +0200831 * affect the nearly-symmetric links only a little, but
832 * punishes asymmetric links more. This will give a value
833 * between 0 and TQ_MAX_VALUE
834 */
835 tq_asym_penalty = TQ_MAX_VALUE - (TQ_MAX_VALUE *
836 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
837 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count) *
838 (TQ_LOCAL_WINDOW_SIZE - neigh_rq_count)) /
839 (TQ_LOCAL_WINDOW_SIZE *
840 TQ_LOCAL_WINDOW_SIZE *
841 TQ_LOCAL_WINDOW_SIZE);
842
843 batman_ogm_packet->tq = ((batman_ogm_packet->tq * tq_own
844 * tq_asym_penalty) /
845 (TQ_MAX_VALUE * TQ_MAX_VALUE));
846
847 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +0100848 "bidirectional: orig = %-15pM neigh = %-15pM => own_bcast = %2i, real recv = %2i, local tq: %3i, asym_penalty: %3i, total tq: %3i\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200849 orig_node->orig, orig_neigh_node->orig, total_count,
850 neigh_rq_count, tq_own, tq_asym_penalty, batman_ogm_packet->tq);
851
852 /* if link has the minimum required transmission quality
853 * consider it bidirectional */
854 if (batman_ogm_packet->tq >= TQ_TOTAL_BIDRECT_LIMIT)
855 ret = 1;
856
857out:
858 if (neigh_node)
859 neigh_node_free_ref(neigh_node);
860 return ret;
861}
862
863/* processes a batman packet for all interfaces, adjusts the sequence number and
864 * finds out whether it is a duplicate.
865 * returns:
866 * 1 the packet is a duplicate
867 * 0 the packet has not yet been received
868 * -1 the packet is old and has been received while the seqno window
869 * was protected. Caller should drop it.
870 */
Marek Lindner01c42242011-11-28 21:31:55 +0800871static int bat_iv_ogm_update_seqnos(const struct ethhdr *ethhdr,
872 const struct batman_ogm_packet
Marek Lindnerfc957272011-07-30 12:04:12 +0200873 *batman_ogm_packet,
Marek Lindner01c42242011-11-28 21:31:55 +0800874 const struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200875{
876 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
877 struct orig_node *orig_node;
878 struct neigh_node *tmp_neigh_node;
879 struct hlist_node *node;
880 int is_duplicate = 0;
881 int32_t seq_diff;
882 int need_update = 0;
883 int set_mark, ret = -1;
884
885 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
886 if (!orig_node)
887 return 0;
888
889 spin_lock_bh(&orig_node->ogm_cnt_lock);
890 seq_diff = batman_ogm_packet->seqno - orig_node->last_real_seqno;
891
892 /* signalize caller that the packet is to be dropped. */
Antonio Quartulli1e5cc262012-02-26 15:39:42 +0100893 if (!hlist_empty(&orig_node->neigh_list) &&
894 window_protected(bat_priv, seq_diff,
Marek Lindnerfc957272011-07-30 12:04:12 +0200895 &orig_node->batman_seqno_reset))
896 goto out;
897
898 rcu_read_lock();
899 hlist_for_each_entry_rcu(tmp_neigh_node, node,
900 &orig_node->neigh_list, list) {
901
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100902 is_duplicate |= bat_test_bit(tmp_neigh_node->real_bits,
903 orig_node->last_real_seqno,
904 batman_ogm_packet->seqno);
Marek Lindnerfc957272011-07-30 12:04:12 +0200905
906 if (compare_eth(tmp_neigh_node->addr, ethhdr->h_source) &&
907 (tmp_neigh_node->if_incoming == if_incoming))
908 set_mark = 1;
909 else
910 set_mark = 0;
911
912 /* if the window moved, set the update flag. */
913 need_update |= bit_get_packet(bat_priv,
914 tmp_neigh_node->real_bits,
915 seq_diff, set_mark);
916
917 tmp_neigh_node->real_packet_count =
Sven Eckelmann0079d2c2012-02-04 17:34:52 +0100918 bitmap_weight(tmp_neigh_node->real_bits,
919 TQ_LOCAL_WINDOW_SIZE);
Marek Lindnerfc957272011-07-30 12:04:12 +0200920 }
921 rcu_read_unlock();
922
923 if (need_update) {
924 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100925 "updating last_seqno: old %u, new %u\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200926 orig_node->last_real_seqno, batman_ogm_packet->seqno);
927 orig_node->last_real_seqno = batman_ogm_packet->seqno;
928 }
929
930 ret = is_duplicate;
931
932out:
933 spin_unlock_bh(&orig_node->ogm_cnt_lock);
934 orig_node_free_ref(orig_node);
935 return ret;
936}
937
Marek Lindner01c42242011-11-28 21:31:55 +0800938static void bat_iv_ogm_process(const struct ethhdr *ethhdr,
939 struct batman_ogm_packet *batman_ogm_packet,
940 const unsigned char *tt_buff,
941 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +0200942{
943 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
944 struct hard_iface *hard_iface;
945 struct orig_node *orig_neigh_node, *orig_node;
946 struct neigh_node *router = NULL, *router_router = NULL;
947 struct neigh_node *orig_neigh_router = NULL;
948 int has_directlink_flag;
949 int is_my_addr = 0, is_my_orig = 0, is_my_oldorig = 0;
Marek Lindner75cd33f2012-03-01 15:35:16 +0800950 int is_broadcast = 0, is_bidirectional;
951 bool is_single_hop_neigh = false;
Marek Lindnerfc957272011-07-30 12:04:12 +0200952 int is_duplicate;
953 uint32_t if_incoming_seqno;
954
955 /* Silently drop when the batman packet is actually not a
956 * correct packet.
957 *
958 * This might happen if a packet is padded (e.g. Ethernet has a
959 * minimum frame length of 64 byte) and the aggregation interprets
960 * it as an additional length.
961 *
962 * TODO: A more sane solution would be to have a bit in the
963 * batman_ogm_packet to detect whether the packet is the last
964 * packet in an aggregation. Here we expect that the padding
965 * is always zero (or not 0x01)
966 */
Marek Lindner1eeb4792012-02-07 17:20:51 +0800967 if (batman_ogm_packet->header.packet_type != BAT_IV_OGM)
Marek Lindnerfc957272011-07-30 12:04:12 +0200968 return;
969
970 /* could be changed by schedule_own_packet() */
971 if_incoming_seqno = atomic_read(&if_incoming->seqno);
972
973 has_directlink_flag = (batman_ogm_packet->flags & DIRECTLINK ? 1 : 0);
974
Marek Lindner75cd33f2012-03-01 15:35:16 +0800975 if (compare_eth(ethhdr->h_source, batman_ogm_packet->orig))
976 is_single_hop_neigh = true;
Marek Lindnerfc957272011-07-30 12:04:12 +0200977
978 bat_dbg(DBG_BATMAN, bat_priv,
Antonio Quartullic97c72b2012-02-26 15:39:41 +0100979 "Received BATMAN packet via NB: %pM, IF: %s [%pM] (from OG: %pM, via prev OG: %pM, seqno %u, ttvn %u, crc %u, changes %u, td %d, TTL %d, V %d, IDF %d)\n",
Marek Lindnerfc957272011-07-30 12:04:12 +0200980 ethhdr->h_source, if_incoming->net_dev->name,
981 if_incoming->net_dev->dev_addr, batman_ogm_packet->orig,
982 batman_ogm_packet->prev_sender, batman_ogm_packet->seqno,
983 batman_ogm_packet->ttvn, batman_ogm_packet->tt_crc,
984 batman_ogm_packet->tt_num_changes, batman_ogm_packet->tq,
Sven Eckelmann76543d12011-11-20 15:47:38 +0100985 batman_ogm_packet->header.ttl,
986 batman_ogm_packet->header.version, has_directlink_flag);
Marek Lindnerfc957272011-07-30 12:04:12 +0200987
988 rcu_read_lock();
989 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
990 if (hard_iface->if_status != IF_ACTIVE)
991 continue;
992
993 if (hard_iface->soft_iface != if_incoming->soft_iface)
994 continue;
995
996 if (compare_eth(ethhdr->h_source,
997 hard_iface->net_dev->dev_addr))
998 is_my_addr = 1;
999
1000 if (compare_eth(batman_ogm_packet->orig,
1001 hard_iface->net_dev->dev_addr))
1002 is_my_orig = 1;
1003
1004 if (compare_eth(batman_ogm_packet->prev_sender,
1005 hard_iface->net_dev->dev_addr))
1006 is_my_oldorig = 1;
1007
1008 if (is_broadcast_ether_addr(ethhdr->h_source))
1009 is_broadcast = 1;
1010 }
1011 rcu_read_unlock();
1012
Sven Eckelmann76543d12011-11-20 15:47:38 +01001013 if (batman_ogm_packet->header.version != COMPAT_VERSION) {
Marek Lindnerfc957272011-07-30 12:04:12 +02001014 bat_dbg(DBG_BATMAN, bat_priv,
1015 "Drop packet: incompatible batman version (%i)\n",
Sven Eckelmann76543d12011-11-20 15:47:38 +01001016 batman_ogm_packet->header.version);
Marek Lindnerfc957272011-07-30 12:04:12 +02001017 return;
1018 }
1019
1020 if (is_my_addr) {
1021 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001022 "Drop packet: received my own broadcast (sender: %pM)\n",
Marek Lindnerfc957272011-07-30 12:04:12 +02001023 ethhdr->h_source);
1024 return;
1025 }
1026
1027 if (is_broadcast) {
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001028 bat_dbg(DBG_BATMAN, bat_priv,
1029 "Drop packet: ignoring all packets with broadcast source addr (sender: %pM)\n",
1030 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001031 return;
1032 }
1033
1034 if (is_my_orig) {
1035 unsigned long *word;
1036 int offset;
1037
1038 orig_neigh_node = get_orig_node(bat_priv, ethhdr->h_source);
1039 if (!orig_neigh_node)
1040 return;
1041
1042 /* neighbor has to indicate direct link and it has to
1043 * come via the corresponding interface */
1044 /* save packet seqno for bidirectional check */
1045 if (has_directlink_flag &&
1046 compare_eth(if_incoming->net_dev->dev_addr,
1047 batman_ogm_packet->orig)) {
1048 offset = if_incoming->if_num * NUM_WORDS;
1049
1050 spin_lock_bh(&orig_neigh_node->ogm_cnt_lock);
1051 word = &(orig_neigh_node->bcast_own[offset]);
Sven Eckelmann0079d2c2012-02-04 17:34:52 +01001052 bat_set_bit(word,
1053 if_incoming_seqno -
Marek Lindnerfc957272011-07-30 12:04:12 +02001054 batman_ogm_packet->seqno - 2);
1055 orig_neigh_node->bcast_own_sum[if_incoming->if_num] =
Sven Eckelmann0079d2c2012-02-04 17:34:52 +01001056 bitmap_weight(word, TQ_LOCAL_WINDOW_SIZE);
Marek Lindnerfc957272011-07-30 12:04:12 +02001057 spin_unlock_bh(&orig_neigh_node->ogm_cnt_lock);
1058 }
1059
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001060 bat_dbg(DBG_BATMAN, bat_priv,
1061 "Drop packet: originator packet from myself (via neighbor)\n");
Marek Lindnerfc957272011-07-30 12:04:12 +02001062 orig_node_free_ref(orig_neigh_node);
1063 return;
1064 }
1065
1066 if (is_my_oldorig) {
1067 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001068 "Drop packet: ignoring all rebroadcast echos (sender: %pM)\n",
1069 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001070 return;
1071 }
1072
1073 orig_node = get_orig_node(bat_priv, batman_ogm_packet->orig);
1074 if (!orig_node)
1075 return;
1076
Marek Lindner01c42242011-11-28 21:31:55 +08001077 is_duplicate = bat_iv_ogm_update_seqnos(ethhdr, batman_ogm_packet,
1078 if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001079
1080 if (is_duplicate == -1) {
1081 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001082 "Drop packet: packet within seqno protection time (sender: %pM)\n",
1083 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001084 goto out;
1085 }
1086
1087 if (batman_ogm_packet->tq == 0) {
1088 bat_dbg(DBG_BATMAN, bat_priv,
1089 "Drop packet: originator packet with tq equal 0\n");
1090 goto out;
1091 }
1092
1093 router = orig_node_get_router(orig_node);
1094 if (router)
1095 router_router = orig_node_get_router(router->orig_node);
1096
1097 /* avoid temporary routing loops */
1098 if (router && router_router &&
1099 (compare_eth(router->addr, batman_ogm_packet->prev_sender)) &&
1100 !(compare_eth(batman_ogm_packet->orig,
1101 batman_ogm_packet->prev_sender)) &&
1102 (compare_eth(router->addr, router_router->addr))) {
1103 bat_dbg(DBG_BATMAN, bat_priv,
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001104 "Drop packet: ignoring all rebroadcast packets that may make me loop (sender: %pM)\n",
1105 ethhdr->h_source);
Marek Lindnerfc957272011-07-30 12:04:12 +02001106 goto out;
1107 }
1108
1109 /* if sender is a direct neighbor the sender mac equals
1110 * originator mac */
1111 orig_neigh_node = (is_single_hop_neigh ?
1112 orig_node :
1113 get_orig_node(bat_priv, ethhdr->h_source));
1114 if (!orig_neigh_node)
1115 goto out;
1116
1117 orig_neigh_router = orig_node_get_router(orig_neigh_node);
1118
1119 /* drop packet if sender is not a direct neighbor and if we
1120 * don't route towards it */
1121 if (!is_single_hop_neigh && (!orig_neigh_router)) {
1122 bat_dbg(DBG_BATMAN, bat_priv,
1123 "Drop packet: OGM via unknown neighbor!\n");
1124 goto out_neigh;
1125 }
1126
Marek Lindner01c42242011-11-28 21:31:55 +08001127 is_bidirectional = bat_iv_ogm_calc_tq(orig_node, orig_neigh_node,
1128 batman_ogm_packet, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001129
1130 bonding_save_primary(orig_node, orig_neigh_node, batman_ogm_packet);
1131
1132 /* update ranking if it is not a duplicate or has the same
1133 * seqno and similar ttl as the non-duplicate */
1134 if (is_bidirectional &&
1135 (!is_duplicate ||
1136 ((orig_node->last_real_seqno == batman_ogm_packet->seqno) &&
Sven Eckelmann76543d12011-11-20 15:47:38 +01001137 (orig_node->last_ttl - 3 <= batman_ogm_packet->header.ttl))))
Marek Lindner01c42242011-11-28 21:31:55 +08001138 bat_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
1139 batman_ogm_packet, if_incoming,
1140 tt_buff, is_duplicate);
Marek Lindnerfc957272011-07-30 12:04:12 +02001141
1142 /* is single hop (direct) neighbor */
1143 if (is_single_hop_neigh) {
1144
1145 /* mark direct link on incoming interface */
Marek Lindner01c42242011-11-28 21:31:55 +08001146 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
Marek Lindner75cd33f2012-03-01 15:35:16 +08001147 is_single_hop_neigh, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001148
Sven Eckelmann86ceb362012-03-07 09:07:45 +01001149 bat_dbg(DBG_BATMAN, bat_priv,
1150 "Forwarding packet: rebroadcast neighbor packet with direct link flag\n");
Marek Lindnerfc957272011-07-30 12:04:12 +02001151 goto out_neigh;
1152 }
1153
1154 /* multihop originator */
1155 if (!is_bidirectional) {
1156 bat_dbg(DBG_BATMAN, bat_priv,
1157 "Drop packet: not received via bidirectional link\n");
1158 goto out_neigh;
1159 }
1160
1161 if (is_duplicate) {
1162 bat_dbg(DBG_BATMAN, bat_priv,
1163 "Drop packet: duplicate packet received\n");
1164 goto out_neigh;
1165 }
1166
1167 bat_dbg(DBG_BATMAN, bat_priv,
1168 "Forwarding packet: rebroadcast originator packet\n");
Marek Lindner01c42242011-11-28 21:31:55 +08001169 bat_iv_ogm_forward(orig_node, ethhdr, batman_ogm_packet,
Marek Lindner75cd33f2012-03-01 15:35:16 +08001170 is_single_hop_neigh, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001171
1172out_neigh:
1173 if ((orig_neigh_node) && (!is_single_hop_neigh))
1174 orig_node_free_ref(orig_neigh_node);
1175out:
1176 if (router)
1177 neigh_node_free_ref(router);
1178 if (router_router)
1179 neigh_node_free_ref(router_router);
1180 if (orig_neigh_router)
1181 neigh_node_free_ref(orig_neigh_router);
1182
1183 orig_node_free_ref(orig_node);
1184}
1185
Marek Lindnerc3e29312012-03-04 16:56:25 +08001186static int bat_iv_ogm_receive(struct sk_buff *skb,
1187 struct hard_iface *if_incoming)
Marek Lindnerfc957272011-07-30 12:04:12 +02001188{
1189 struct batman_ogm_packet *batman_ogm_packet;
Marek Lindner8780dad2011-12-05 04:01:51 +08001190 struct ethhdr *ethhdr;
1191 int buff_pos = 0, packet_len;
1192 unsigned char *tt_buff, *packet_buff;
Marek Lindnerc3e29312012-03-04 16:56:25 +08001193 bool ret;
1194
1195 ret = check_management_packet(skb, if_incoming, BATMAN_OGM_HLEN);
1196 if (!ret)
1197 return NET_RX_DROP;
Marek Lindnerfc957272011-07-30 12:04:12 +02001198
Marek Lindner8780dad2011-12-05 04:01:51 +08001199 packet_len = skb_headlen(skb);
1200 ethhdr = (struct ethhdr *)skb_mac_header(skb);
1201 packet_buff = skb->data;
Marek Lindnerfc957272011-07-30 12:04:12 +02001202 batman_ogm_packet = (struct batman_ogm_packet *)packet_buff;
1203
1204 /* unpack the aggregated packets and process them one by one */
1205 do {
1206 /* network to host order for our 32bit seqno and the
1207 orig_interval */
1208 batman_ogm_packet->seqno = ntohl(batman_ogm_packet->seqno);
1209 batman_ogm_packet->tt_crc = ntohs(batman_ogm_packet->tt_crc);
1210
Marek Lindner76e3d7fc2012-02-07 17:20:50 +08001211 tt_buff = packet_buff + buff_pos + BATMAN_OGM_HLEN;
Marek Lindnerfc957272011-07-30 12:04:12 +02001212
Marek Lindner01c42242011-11-28 21:31:55 +08001213 bat_iv_ogm_process(ethhdr, batman_ogm_packet,
1214 tt_buff, if_incoming);
Marek Lindnerfc957272011-07-30 12:04:12 +02001215
Marek Lindner76e3d7fc2012-02-07 17:20:50 +08001216 buff_pos += BATMAN_OGM_HLEN +
Marek Lindnerfc957272011-07-30 12:04:12 +02001217 tt_len(batman_ogm_packet->tt_num_changes);
1218
1219 batman_ogm_packet = (struct batman_ogm_packet *)
1220 (packet_buff + buff_pos);
Marek Lindner01c42242011-11-28 21:31:55 +08001221 } while (bat_iv_ogm_aggr_packet(buff_pos, packet_len,
1222 batman_ogm_packet->tt_num_changes));
Marek Lindnerc3e29312012-03-04 16:56:25 +08001223
1224 kfree_skb(skb);
1225 return NET_RX_SUCCESS;
Marek Lindnerfc957272011-07-30 12:04:12 +02001226}
Marek Lindner1c280472011-11-28 17:40:17 +08001227
1228static struct bat_algo_ops batman_iv __read_mostly = {
1229 .name = "BATMAN IV",
Marek Lindnerc2aca022012-02-07 17:20:45 +08001230 .bat_iface_enable = bat_iv_ogm_iface_enable,
Marek Lindner00a50072012-02-07 17:20:47 +08001231 .bat_iface_disable = bat_iv_ogm_iface_disable,
Marek Lindnercd8b78e2012-02-07 17:20:49 +08001232 .bat_primary_iface_set = bat_iv_ogm_primary_iface_set,
Marek Lindner01c42242011-11-28 21:31:55 +08001233 .bat_ogm_update_mac = bat_iv_ogm_update_mac,
1234 .bat_ogm_schedule = bat_iv_ogm_schedule,
1235 .bat_ogm_emit = bat_iv_ogm_emit,
Marek Lindner1c280472011-11-28 17:40:17 +08001236};
1237
1238int __init bat_iv_init(void)
1239{
Marek Lindnerc3e29312012-03-04 16:56:25 +08001240 int ret;
1241
1242 /* batman originator packet */
1243 ret = recv_handler_register(BAT_IV_OGM, bat_iv_ogm_receive);
1244 if (ret < 0)
1245 goto out;
1246
1247 ret = bat_algo_register(&batman_iv);
1248 if (ret < 0)
1249 goto handler_unregister;
1250
1251 goto out;
1252
1253handler_unregister:
1254 recv_handler_unregister(BAT_IV_OGM);
1255out:
1256 return ret;
Marek Lindner1c280472011-11-28 17:40:17 +08001257}