blob: 4b8e11bc14fa656275df364be3438c50aa1713e2 [file] [log] [blame]
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00001/*
Sven Eckelmann64afe352011-01-27 10:38:15 +01002 * Copyright (C) 2007-2011 B.A.T.M.A.N. contributors:
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +00003 *
4 * Marek Lindner, Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22#include "main.h"
23#include "send.h"
24#include "routing.h"
25#include "translation-table.h"
26#include "soft-interface.h"
27#include "hard-interface.h"
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000028#include "vis.h"
29#include "aggregation.h"
30#include "gateway_common.h"
31#include "originator.h"
32
33static void send_outstanding_bcast_packet(struct work_struct *work);
34
35/* apply hop penalty for a normal link */
Sven Eckelmann747e4222011-05-14 23:14:50 +020036static uint8_t hop_penalty(uint8_t tq, const struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000037{
38 int hop_penalty = atomic_read(&bat_priv->hop_penalty);
39 return (tq * (TQ_MAX_VALUE - hop_penalty)) / (TQ_MAX_VALUE);
40}
41
42/* when do we schedule our own packet to be sent */
Sven Eckelmann747e4222011-05-14 23:14:50 +020043static unsigned long own_send_time(const struct bat_priv *bat_priv)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000044{
45 return jiffies + msecs_to_jiffies(
46 atomic_read(&bat_priv->orig_interval) -
47 JITTER + (random32() % 2*JITTER));
48}
49
50/* when do we schedule a forwarded packet to be sent */
Simon Wunderlich74ef1152010-12-29 16:15:19 +000051static unsigned long forward_send_time(void)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000052{
53 return jiffies + msecs_to_jiffies(random32() % (JITTER/2));
54}
55
56/* send out an already prepared packet to the given address via the
57 * specified batman interface */
Sven Eckelmann747e4222011-05-14 23:14:50 +020058int send_skb_packet(struct sk_buff *skb, struct hard_iface *hard_iface,
59 const uint8_t *dst_addr)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000060{
61 struct ethhdr *ethhdr;
62
Marek Lindnere6c10f42011-02-18 12:33:20 +000063 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000064 goto send_skb_err;
65
Marek Lindnere6c10f42011-02-18 12:33:20 +000066 if (unlikely(!hard_iface->net_dev))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000067 goto send_skb_err;
68
Marek Lindnere6c10f42011-02-18 12:33:20 +000069 if (!(hard_iface->net_dev->flags & IFF_UP)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000070 pr_warning("Interface %s is not up - can't send packet via "
Marek Lindnere6c10f42011-02-18 12:33:20 +000071 "that interface!\n", hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000072 goto send_skb_err;
73 }
74
75 /* push to the ethernet header. */
Sven Eckelmann704509b2011-05-14 23:14:54 +020076 if (my_skb_head_push(skb, sizeof(*ethhdr)) < 0)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000077 goto send_skb_err;
78
79 skb_reset_mac_header(skb);
80
81 ethhdr = (struct ethhdr *) skb_mac_header(skb);
Marek Lindnere6c10f42011-02-18 12:33:20 +000082 memcpy(ethhdr->h_source, hard_iface->net_dev->dev_addr, ETH_ALEN);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000083 memcpy(ethhdr->h_dest, dst_addr, ETH_ALEN);
84 ethhdr->h_proto = __constant_htons(ETH_P_BATMAN);
85
86 skb_set_network_header(skb, ETH_HLEN);
87 skb->priority = TC_PRIO_CONTROL;
88 skb->protocol = __constant_htons(ETH_P_BATMAN);
89
Marek Lindnere6c10f42011-02-18 12:33:20 +000090 skb->dev = hard_iface->net_dev;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +000091
92 /* dev_queue_xmit() returns a negative result on error. However on
93 * congestion and traffic shaping, it drops and returns NET_XMIT_DROP
94 * (which is > 0). This will not be treated as an error. */
95
96 return dev_queue_xmit(skb);
97send_skb_err:
98 kfree_skb(skb);
99 return NET_XMIT_DROP;
100}
101
102/* Send a packet to a given interface */
103static void send_packet_to_if(struct forw_packet *forw_packet,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000104 struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000105{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000106 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000107 char *fwd_str;
108 uint8_t packet_num;
109 int16_t buff_pos;
110 struct batman_packet *batman_packet;
111 struct sk_buff *skb;
112
Marek Lindnere6c10f42011-02-18 12:33:20 +0000113 if (hard_iface->if_status != IF_ACTIVE)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000114 return;
115
116 packet_num = 0;
117 buff_pos = 0;
118 batman_packet = (struct batman_packet *)forw_packet->skb->data;
119
120 /* adjust all flags and log packets */
121 while (aggregated_packet(buff_pos,
122 forw_packet->packet_len,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200123 batman_packet->tt_num_changes)) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000124
125 /* we might have aggregated direct link packets with an
126 * ordinary base packet */
127 if ((forw_packet->direct_link_flags & (1 << packet_num)) &&
Marek Lindnere6c10f42011-02-18 12:33:20 +0000128 (forw_packet->if_incoming == hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000129 batman_packet->flags |= DIRECTLINK;
130 else
131 batman_packet->flags &= ~DIRECTLINK;
132
133 fwd_str = (packet_num > 0 ? "Forwarding" : (forw_packet->own ?
134 "Sending own" :
135 "Forwarding"));
136 bat_dbg(DBG_BATMAN, bat_priv,
137 "%s %spacket (originator %pM, seqno %d, TQ %d, TTL %d,"
Antonio Quartullia73105b2011-04-27 14:27:44 +0200138 " IDF %s, hvn %d) on interface %s [%pM]\n",
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000139 fwd_str, (packet_num > 0 ? "aggregated " : ""),
140 batman_packet->orig, ntohl(batman_packet->seqno),
141 batman_packet->tq, batman_packet->ttl,
142 (batman_packet->flags & DIRECTLINK ?
143 "on" : "off"),
Antonio Quartullia73105b2011-04-27 14:27:44 +0200144 batman_packet->ttvn, hard_iface->net_dev->name,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000145 hard_iface->net_dev->dev_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000146
Sven Eckelmann704509b2011-05-14 23:14:54 +0200147 buff_pos += sizeof(*batman_packet) +
Antonio Quartullia73105b2011-04-27 14:27:44 +0200148 tt_len(batman_packet->tt_num_changes);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000149 packet_num++;
150 batman_packet = (struct batman_packet *)
151 (forw_packet->skb->data + buff_pos);
152 }
153
154 /* create clone because function is called more than once */
155 skb = skb_clone(forw_packet->skb, GFP_ATOMIC);
156 if (skb)
Marek Lindnere6c10f42011-02-18 12:33:20 +0000157 send_skb_packet(skb, hard_iface, broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000158}
159
160/* send a batman packet */
161static void send_packet(struct forw_packet *forw_packet)
162{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000163 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000164 struct net_device *soft_iface;
165 struct bat_priv *bat_priv;
Marek Lindner6a020ab2011-06-26 15:26:18 +0200166 struct hard_iface *primary_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000167 struct batman_packet *batman_packet =
168 (struct batman_packet *)(forw_packet->skb->data);
Sven Eckelmannb4e17052011-06-15 09:41:37 +0200169 int directlink = (batman_packet->flags & DIRECTLINK ? 1 : 0);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000170
171 if (!forw_packet->if_incoming) {
172 pr_err("Error - can't forward packet: incoming iface not "
173 "specified\n");
Marek Lindner6a020ab2011-06-26 15:26:18 +0200174 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000175 }
176
177 soft_iface = forw_packet->if_incoming->soft_iface;
178 bat_priv = netdev_priv(soft_iface);
179
180 if (forw_packet->if_incoming->if_status != IF_ACTIVE)
Marek Lindner6a020ab2011-06-26 15:26:18 +0200181 goto out;
182
183 primary_if = primary_if_get_selected(bat_priv);
184 if (!primary_if)
185 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000186
187 /* multihomed peer assumed */
188 /* non-primary OGMs are only broadcasted on their interface */
189 if ((directlink && (batman_packet->ttl == 1)) ||
Marek Lindner6a020ab2011-06-26 15:26:18 +0200190 (forw_packet->own && (forw_packet->if_incoming != primary_if))) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000191
192 /* FIXME: what about aggregated packets ? */
193 bat_dbg(DBG_BATMAN, bat_priv,
194 "%s packet (originator %pM, seqno %d, TTL %d) "
195 "on interface %s [%pM]\n",
196 (forw_packet->own ? "Sending own" : "Forwarding"),
197 batman_packet->orig, ntohl(batman_packet->seqno),
198 batman_packet->ttl,
199 forw_packet->if_incoming->net_dev->name,
200 forw_packet->if_incoming->net_dev->dev_addr);
201
202 /* skb is only used once and than forw_packet is free'd */
203 send_skb_packet(forw_packet->skb, forw_packet->if_incoming,
204 broadcast_addr);
205 forw_packet->skb = NULL;
206
Marek Lindner6a020ab2011-06-26 15:26:18 +0200207 goto out;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000208 }
209
210 /* broadcast on every interface */
211 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000212 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
213 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000214 continue;
215
Marek Lindnere6c10f42011-02-18 12:33:20 +0000216 send_packet_to_if(forw_packet, hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000217 }
218 rcu_read_unlock();
Marek Lindner6a020ab2011-06-26 15:26:18 +0200219
220out:
221 if (primary_if)
222 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000223}
224
Antonio Quartullia73105b2011-04-27 14:27:44 +0200225static void realloc_packet_buffer(struct hard_iface *hard_iface,
226 int new_len)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000227{
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000228 unsigned char *new_buff;
229 struct batman_packet *batman_packet;
230
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000231 new_buff = kmalloc(new_len, GFP_ATOMIC);
232
233 /* keep old buffer if kmalloc should fail */
234 if (new_buff) {
Marek Lindnere6c10f42011-02-18 12:33:20 +0000235 memcpy(new_buff, hard_iface->packet_buff,
Sven Eckelmann704509b2011-05-14 23:14:54 +0200236 sizeof(*batman_packet));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000237
Marek Lindnere6c10f42011-02-18 12:33:20 +0000238 kfree(hard_iface->packet_buff);
239 hard_iface->packet_buff = new_buff;
240 hard_iface->packet_len = new_len;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000241 }
242}
243
Antonio Quartullia73105b2011-04-27 14:27:44 +0200244/* when calling this function (hard_iface == primary_if) has to be true */
245static void prepare_packet_buffer(struct bat_priv *bat_priv,
246 struct hard_iface *hard_iface)
247{
248 int new_len;
249 struct batman_packet *batman_packet;
250
251 new_len = BAT_PACKET_LEN +
252 tt_len((uint8_t)atomic_read(&bat_priv->tt_local_changes));
253
254 /* if we have too many changes for one packet don't send any
255 * and wait for the tt table request which will be fragmented */
256 if (new_len > hard_iface->soft_iface->mtu)
257 new_len = BAT_PACKET_LEN;
258
259 realloc_packet_buffer(hard_iface, new_len);
260 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
261
262 atomic_set(&bat_priv->tt_crc, tt_local_crc(bat_priv));
263
264 /* reset the sending counter */
265 atomic_set(&bat_priv->tt_ogm_append_cnt, TT_OGM_APPEND_MAX);
266
267 batman_packet->tt_num_changes = tt_changes_fill_buffer(bat_priv,
268 hard_iface->packet_buff + BAT_PACKET_LEN,
269 hard_iface->packet_len - BAT_PACKET_LEN);
270
271}
272
273static void reset_packet_buffer(struct bat_priv *bat_priv,
274 struct hard_iface *hard_iface)
275{
276 struct batman_packet *batman_packet;
277
278 realloc_packet_buffer(hard_iface, BAT_PACKET_LEN);
279
280 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
281 batman_packet->tt_num_changes = 0;
282}
283
Marek Lindnere6c10f42011-02-18 12:33:20 +0000284void schedule_own_packet(struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000285{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000286 struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200287 struct hard_iface *primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000288 unsigned long send_time;
289 struct batman_packet *batman_packet;
290 int vis_server;
291
Marek Lindnere6c10f42011-02-18 12:33:20 +0000292 if ((hard_iface->if_status == IF_NOT_IN_USE) ||
293 (hard_iface->if_status == IF_TO_BE_REMOVED))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000294 return;
295
296 vis_server = atomic_read(&bat_priv->vis_mode);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200297 primary_if = primary_if_get_selected(bat_priv);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000298
299 /**
300 * the interface gets activated here to avoid race conditions between
301 * the moment of activating the interface in
302 * hardif_activate_interface() where the originator mac is set and
303 * outdated packets (especially uninitialized mac addresses) in the
304 * packet queue
305 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000306 if (hard_iface->if_status == IF_TO_BE_ACTIVATED)
307 hard_iface->if_status = IF_ACTIVE;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000308
Antonio Quartullia73105b2011-04-27 14:27:44 +0200309 if (hard_iface == primary_if) {
310 /* if at least one change happened */
311 if (atomic_read(&bat_priv->tt_local_changes) > 0) {
312 prepare_packet_buffer(bat_priv, hard_iface);
313 /* Increment the TTVN only once per OGM interval */
314 atomic_inc(&bat_priv->ttvn);
Antonio Quartullicc47f662011-04-27 14:27:57 +0200315 bat_priv->tt_poss_change = false;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200316 }
317
318 /* if the changes have been sent enough times */
319 if (!atomic_dec_not_zero(&bat_priv->tt_ogm_append_cnt))
320 reset_packet_buffer(bat_priv, hard_iface);
321 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000322
323 /**
324 * NOTE: packet_buff might just have been re-allocated in
Antonio Quartullia73105b2011-04-27 14:27:44 +0200325 * prepare_packet_buffer() or in reset_packet_buffer()
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000326 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000327 batman_packet = (struct batman_packet *)hard_iface->packet_buff;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000328
329 /* change sequence number to network order */
330 batman_packet->seqno =
Marek Lindnere6c10f42011-02-18 12:33:20 +0000331 htonl((uint32_t)atomic_read(&hard_iface->seqno));
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000332
Antonio Quartullia73105b2011-04-27 14:27:44 +0200333 batman_packet->ttvn = atomic_read(&bat_priv->ttvn);
334 batman_packet->tt_crc = htons((uint16_t)atomic_read(&bat_priv->tt_crc));
335
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000336 if (vis_server == VIS_TYPE_SERVER_SYNC)
337 batman_packet->flags |= VIS_SERVER;
338 else
339 batman_packet->flags &= ~VIS_SERVER;
340
Marek Lindner32ae9b22011-04-20 15:40:58 +0200341 if ((hard_iface == primary_if) &&
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000342 (atomic_read(&bat_priv->gw_mode) == GW_MODE_SERVER))
343 batman_packet->gw_flags =
344 (uint8_t)atomic_read(&bat_priv->gw_bandwidth);
345 else
Marek Lindnerecbd5322011-06-09 17:13:09 +0200346 batman_packet->gw_flags = NO_FLAGS;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000347
Marek Lindnere6c10f42011-02-18 12:33:20 +0000348 atomic_inc(&hard_iface->seqno);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000349
Marek Lindnere6c10f42011-02-18 12:33:20 +0000350 slide_own_bcast_window(hard_iface);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000351 send_time = own_send_time(bat_priv);
352 add_bat_packet_to_list(bat_priv,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000353 hard_iface->packet_buff,
354 hard_iface->packet_len,
355 hard_iface, 1, send_time);
Marek Lindner32ae9b22011-04-20 15:40:58 +0200356
357 if (primary_if)
358 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000359}
360
361void schedule_forward_packet(struct orig_node *orig_node,
Sven Eckelmann747e4222011-05-14 23:14:50 +0200362 const struct ethhdr *ethhdr,
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000363 struct batman_packet *batman_packet,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200364 int directlink,
Marek Lindnere6c10f42011-02-18 12:33:20 +0000365 struct hard_iface *if_incoming)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000366{
367 struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000368 struct neigh_node *router;
Sven Eckelmannb4e17052011-06-15 09:41:37 +0200369 uint8_t in_tq, in_ttl, tq_avg = 0;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000370 unsigned long send_time;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200371 uint8_t tt_num_changes;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000372
373 if (batman_packet->ttl <= 1) {
374 bat_dbg(DBG_BATMAN, bat_priv, "ttl exceeded\n");
375 return;
376 }
377
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000378 router = orig_node_get_router(orig_node);
379
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000380 in_tq = batman_packet->tq;
381 in_ttl = batman_packet->ttl;
Antonio Quartullia73105b2011-04-27 14:27:44 +0200382 tt_num_changes = batman_packet->tt_num_changes;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000383
384 batman_packet->ttl--;
385 memcpy(batman_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
386
387 /* rebroadcast tq of our best ranking neighbor to ensure the rebroadcast
388 * of our best tq value */
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000389 if (router && router->tq_avg != 0) {
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000390
391 /* rebroadcast ogm of best ranking neighbor as is */
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000392 if (!compare_eth(router->addr, ethhdr->h_source)) {
393 batman_packet->tq = router->tq_avg;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000394
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000395 if (router->last_ttl)
396 batman_packet->ttl = router->last_ttl - 1;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000397 }
398
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000399 tq_avg = router->tq_avg;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000400 }
401
Linus Lüssinge1a5382f2011-03-14 22:43:37 +0000402 if (router)
403 neigh_node_free_ref(router);
404
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000405 /* apply hop penalty */
406 batman_packet->tq = hop_penalty(batman_packet->tq, bat_priv);
407
408 bat_dbg(DBG_BATMAN, bat_priv,
409 "Forwarding packet: tq_orig: %i, tq_avg: %i, "
410 "tq_forw: %i, ttl_orig: %i, ttl_forw: %i\n",
411 in_tq, tq_avg, batman_packet->tq, in_ttl - 1,
412 batman_packet->ttl);
413
414 batman_packet->seqno = htonl(batman_packet->seqno);
Antonio Quartullia73105b2011-04-27 14:27:44 +0200415 batman_packet->tt_crc = htons(batman_packet->tt_crc);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000416
417 /* switch of primaries first hop flag when forwarding */
418 batman_packet->flags &= ~PRIMARIES_FIRST_HOP;
419 if (directlink)
420 batman_packet->flags |= DIRECTLINK;
421 else
422 batman_packet->flags &= ~DIRECTLINK;
423
Simon Wunderlich74ef1152010-12-29 16:15:19 +0000424 send_time = forward_send_time();
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000425 add_bat_packet_to_list(bat_priv,
426 (unsigned char *)batman_packet,
Antonio Quartullia73105b2011-04-27 14:27:44 +0200427 sizeof(*batman_packet) + tt_len(tt_num_changes),
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000428 if_incoming, 0, send_time);
429}
430
431static void forw_packet_free(struct forw_packet *forw_packet)
432{
433 if (forw_packet->skb)
434 kfree_skb(forw_packet->skb);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200435 if (forw_packet->if_incoming)
436 hardif_free_ref(forw_packet->if_incoming);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000437 kfree(forw_packet);
438}
439
440static void _add_bcast_packet_to_list(struct bat_priv *bat_priv,
441 struct forw_packet *forw_packet,
442 unsigned long send_time)
443{
444 INIT_HLIST_NODE(&forw_packet->list);
445
446 /* add new packet to packet list */
447 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
448 hlist_add_head(&forw_packet->list, &bat_priv->forw_bcast_list);
449 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
450
451 /* start timer for this packet */
452 INIT_DELAYED_WORK(&forw_packet->delayed_work,
453 send_outstanding_bcast_packet);
454 queue_delayed_work(bat_event_workqueue, &forw_packet->delayed_work,
455 send_time);
456}
457
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000458/* add a broadcast packet to the queue and setup timers. broadcast packets
459 * are sent multiple times to increase probability for beeing received.
460 *
461 * This function returns NETDEV_TX_OK on success and NETDEV_TX_BUSY on
462 * errors.
463 *
464 * The skb is not consumed, so the caller should make sure that the
465 * skb is freed. */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200466int add_bcast_packet_to_list(struct bat_priv *bat_priv,
Antonio Quartulli86985292011-06-25 19:09:12 +0200467 const struct sk_buff *skb, unsigned long delay)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000468{
Marek Lindner32ae9b22011-04-20 15:40:58 +0200469 struct hard_iface *primary_if = NULL;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000470 struct forw_packet *forw_packet;
471 struct bcast_packet *bcast_packet;
Sven Eckelmann747e4222011-05-14 23:14:50 +0200472 struct sk_buff *newskb;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000473
474 if (!atomic_dec_not_zero(&bat_priv->bcast_queue_left)) {
475 bat_dbg(DBG_BATMAN, bat_priv, "bcast packet queue full\n");
476 goto out;
477 }
478
Marek Lindner32ae9b22011-04-20 15:40:58 +0200479 primary_if = primary_if_get_selected(bat_priv);
480 if (!primary_if)
Marek Lindnerca06c6e2011-05-14 20:01:22 +0200481 goto out_and_inc;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000482
Sven Eckelmann704509b2011-05-14 23:14:54 +0200483 forw_packet = kmalloc(sizeof(*forw_packet), GFP_ATOMIC);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000484
485 if (!forw_packet)
486 goto out_and_inc;
487
Sven Eckelmann747e4222011-05-14 23:14:50 +0200488 newskb = skb_copy(skb, GFP_ATOMIC);
489 if (!newskb)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000490 goto packet_free;
491
492 /* as we have a copy now, it is safe to decrease the TTL */
Sven Eckelmann747e4222011-05-14 23:14:50 +0200493 bcast_packet = (struct bcast_packet *)newskb->data;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000494 bcast_packet->ttl--;
495
Sven Eckelmann747e4222011-05-14 23:14:50 +0200496 skb_reset_mac_header(newskb);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000497
Sven Eckelmann747e4222011-05-14 23:14:50 +0200498 forw_packet->skb = newskb;
Marek Lindner32ae9b22011-04-20 15:40:58 +0200499 forw_packet->if_incoming = primary_if;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000500
501 /* how often did we send the bcast packet ? */
502 forw_packet->num_packets = 0;
503
Antonio Quartulli86985292011-06-25 19:09:12 +0200504 _add_bcast_packet_to_list(bat_priv, forw_packet, delay);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000505 return NETDEV_TX_OK;
506
507packet_free:
508 kfree(forw_packet);
509out_and_inc:
510 atomic_inc(&bat_priv->bcast_queue_left);
511out:
Marek Lindner32ae9b22011-04-20 15:40:58 +0200512 if (primary_if)
513 hardif_free_ref(primary_if);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000514 return NETDEV_TX_BUSY;
515}
516
517static void send_outstanding_bcast_packet(struct work_struct *work)
518{
Marek Lindnere6c10f42011-02-18 12:33:20 +0000519 struct hard_iface *hard_iface;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000520 struct delayed_work *delayed_work =
521 container_of(work, struct delayed_work, work);
522 struct forw_packet *forw_packet =
523 container_of(delayed_work, struct forw_packet, delayed_work);
524 struct sk_buff *skb1;
525 struct net_device *soft_iface = forw_packet->if_incoming->soft_iface;
526 struct bat_priv *bat_priv = netdev_priv(soft_iface);
527
528 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
529 hlist_del(&forw_packet->list);
530 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
531
532 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
533 goto out;
534
535 /* rebroadcast packet */
536 rcu_read_lock();
Marek Lindnere6c10f42011-02-18 12:33:20 +0000537 list_for_each_entry_rcu(hard_iface, &hardif_list, list) {
538 if (hard_iface->soft_iface != soft_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000539 continue;
540
541 /* send a copy of the saved skb */
542 skb1 = skb_clone(forw_packet->skb, GFP_ATOMIC);
543 if (skb1)
Marek Lindnere6c10f42011-02-18 12:33:20 +0000544 send_skb_packet(skb1, hard_iface, broadcast_addr);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000545 }
546 rcu_read_unlock();
547
548 forw_packet->num_packets++;
549
550 /* if we still have some more bcasts to send */
551 if (forw_packet->num_packets < 3) {
552 _add_bcast_packet_to_list(bat_priv, forw_packet,
553 ((5 * HZ) / 1000));
554 return;
555 }
556
557out:
558 forw_packet_free(forw_packet);
559 atomic_inc(&bat_priv->bcast_queue_left);
560}
561
562void send_outstanding_bat_packet(struct work_struct *work)
563{
564 struct delayed_work *delayed_work =
565 container_of(work, struct delayed_work, work);
566 struct forw_packet *forw_packet =
567 container_of(delayed_work, struct forw_packet, delayed_work);
568 struct bat_priv *bat_priv;
569
570 bat_priv = netdev_priv(forw_packet->if_incoming->soft_iface);
571 spin_lock_bh(&bat_priv->forw_bat_list_lock);
572 hlist_del(&forw_packet->list);
573 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
574
575 if (atomic_read(&bat_priv->mesh_state) == MESH_DEACTIVATING)
576 goto out;
577
578 send_packet(forw_packet);
579
580 /**
581 * we have to have at least one packet in the queue
582 * to determine the queues wake up time unless we are
583 * shutting down
584 */
585 if (forw_packet->own)
586 schedule_own_packet(forw_packet->if_incoming);
587
588out:
589 /* don't count own packet */
590 if (!forw_packet->own)
591 atomic_inc(&bat_priv->batman_queue_left);
592
593 forw_packet_free(forw_packet);
594}
595
596void purge_outstanding_packets(struct bat_priv *bat_priv,
Sven Eckelmann747e4222011-05-14 23:14:50 +0200597 const struct hard_iface *hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000598{
599 struct forw_packet *forw_packet;
600 struct hlist_node *tmp_node, *safe_tmp_node;
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200601 bool pending;
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000602
Marek Lindnere6c10f42011-02-18 12:33:20 +0000603 if (hard_iface)
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000604 bat_dbg(DBG_BATMAN, bat_priv,
605 "purge_outstanding_packets(): %s\n",
Marek Lindnere6c10f42011-02-18 12:33:20 +0000606 hard_iface->net_dev->name);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000607 else
608 bat_dbg(DBG_BATMAN, bat_priv,
609 "purge_outstanding_packets()\n");
610
611 /* free bcast list */
612 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
613 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
614 &bat_priv->forw_bcast_list, list) {
615
616 /**
617 * if purge_outstanding_packets() was called with an argmument
618 * we delete only packets belonging to the given interface
619 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000620 if ((hard_iface) &&
621 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000622 continue;
623
624 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
625
626 /**
627 * send_outstanding_bcast_packet() will lock the list to
628 * delete the item from the list
629 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200630 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000631 spin_lock_bh(&bat_priv->forw_bcast_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200632
633 if (pending) {
634 hlist_del(&forw_packet->list);
635 forw_packet_free(forw_packet);
636 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000637 }
638 spin_unlock_bh(&bat_priv->forw_bcast_list_lock);
639
640 /* free batman packet list */
641 spin_lock_bh(&bat_priv->forw_bat_list_lock);
642 hlist_for_each_entry_safe(forw_packet, tmp_node, safe_tmp_node,
643 &bat_priv->forw_bat_list, list) {
644
645 /**
646 * if purge_outstanding_packets() was called with an argmument
647 * we delete only packets belonging to the given interface
648 */
Marek Lindnere6c10f42011-02-18 12:33:20 +0000649 if ((hard_iface) &&
650 (forw_packet->if_incoming != hard_iface))
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000651 continue;
652
653 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
654
655 /**
656 * send_outstanding_bat_packet() will lock the list to
657 * delete the item from the list
658 */
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200659 pending = cancel_delayed_work_sync(&forw_packet->delayed_work);
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000660 spin_lock_bh(&bat_priv->forw_bat_list_lock);
Sven Eckelmann6d5808d2011-05-11 20:59:06 +0200661
662 if (pending) {
663 hlist_del(&forw_packet->list);
664 forw_packet_free(forw_packet);
665 }
Sven Eckelmannc6c8fea2010-12-13 11:19:28 +0000666 }
667 spin_unlock_bh(&bat_priv->forw_bat_list_lock);
668}