blob: 8c794c1dd531db3d5535d66e19ec09c0d55db449 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloyc1336ee2015-03-13 16:08:08 -04004 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010045
Ying Xue796c75d2013-06-17 10:54:48 -040046#include <linux/pkt_sched.h>
47
Erik Hugne2cf8aa12012-06-29 00:16:37 -040048/*
49 * Error message prefixes
50 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -040051static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -040052static const char *link_rst_msg = "Resetting link ";
Per Lidenb97bf3f2006-01-02 19:04:38 +010053
Richard Alpe7be57fc2014-11-20 10:29:12 +010054static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
55 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_LINK_NAME] = {
57 .type = NLA_STRING,
58 .len = TIPC_MAX_LINK_NAME
59 },
60 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
61 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
62 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
65 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
67 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
68};
69
Richard Alpe0655f6a2014-11-20 10:29:07 +010070/* Properties valid for media, bearar and link */
71static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
72 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
73 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
74 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
76};
77
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +090078/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -040079 * Interval between NACKs when packets arrive out of order
80 */
81#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
82/*
Allan Stephensa686e682008-06-04 17:29:39 -070083 * Out-of-range value for link session numbers
84 */
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040085#define WILDCARD_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -070086
Jon Paul Maloy662921c2015-07-30 18:24:21 -040087/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040088 */
89enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -040090 LINK_ESTABLISHED = 0xe,
91 LINK_ESTABLISHING = 0xe << 4,
92 LINK_RESET = 0x1 << 8,
93 LINK_RESETTING = 0x2 << 12,
94 LINK_PEER_RESET = 0xd << 16,
95 LINK_FAILINGOVER = 0xf << 20,
96 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -040097};
98
99/* Link FSM state checking routines
100 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400101static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400102{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400103 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400104}
105
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400106static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
107 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400108static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
109 u16 rcvgap, int tolerance, int priority,
110 struct sk_buff_head *xmitq);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500111static void link_reset_statistics(struct tipc_link *l_ptr);
112static void link_print(struct tipc_link *l_ptr, const char *str);
Ying Xue247f0f32014-02-18 16:06:46 +0800113static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400114
Per Lidenb97bf3f2006-01-02 19:04:38 +0100115/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800116 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100117 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400118bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100119{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400120 return link_is_up(l);
121}
122
123bool tipc_link_is_reset(struct tipc_link *l)
124{
125 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
126}
127
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400128bool tipc_link_is_establishing(struct tipc_link *l)
129{
130 return l->state == LINK_ESTABLISHING;
131}
132
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400133bool tipc_link_is_synching(struct tipc_link *l)
134{
135 return l->state == LINK_SYNCHING;
136}
137
138bool tipc_link_is_failingover(struct tipc_link *l)
139{
140 return l->state == LINK_FAILINGOVER;
141}
142
143bool tipc_link_is_blocked(struct tipc_link *l)
144{
145 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100146}
147
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400148int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100149{
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400150 struct tipc_node *n = l->owner;
151
152 return (node_active_link(n, 0) == l) || (node_active_link(n, 1) == l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100153}
154
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400155static u32 link_own_addr(struct tipc_link *l)
156{
157 return msg_prevnode(l->pmsg);
158}
159
Per Lidenb97bf3f2006-01-02 19:04:38 +0100160/**
Per Liden4323add2006-01-18 00:38:21 +0100161 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400162 * @n: pointer to associated node
163 * @b: pointer to associated bearer
164 * @ownnode: identity of own node
165 * @peer: identity of peer node
166 * @maddr: media address to be used
167 * @inputq: queue to put messages ready for delivery
168 * @namedq: queue to put binding table update messages ready for delivery
169 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900170 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400171 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100172 */
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400173bool tipc_link_create(struct tipc_node *n, struct tipc_bearer *b, u32 session,
174 u32 ownnode, u32 peer, struct tipc_media_addr *maddr,
175 struct sk_buff_head *inputq, struct sk_buff_head *namedq,
176 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100177{
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400178 struct tipc_link *l;
179 struct tipc_msg *hdr;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100180 char *if_name;
Allan Stephens37b9c082011-02-28 11:32:27 -0500181
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400182 l = kzalloc(sizeof(*l), GFP_ATOMIC);
183 if (!l)
184 return false;
185 *link = l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500186
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400187 /* Note: peer i/f name is completed by reset/activate message */
188 if_name = strchr(b->name, ':') + 1;
189 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
190 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
191 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100192
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400193 l->addr = peer;
194 l->media_addr = maddr;
195 l->owner = n;
196 l->peer_session = WILDCARD_SESSION;
197 l->bearer_id = b->identity;
198 l->tolerance = b->tolerance;
199 l->net_plane = b->net_plane;
200 l->advertised_mtu = b->mtu;
201 l->mtu = b->mtu;
202 l->priority = b->priority;
203 tipc_link_set_queue_limits(l, b->window);
204 l->inputq = inputq;
205 l->namedq = namedq;
206 l->state = LINK_RESETTING;
207 l->pmsg = (struct tipc_msg *)&l->proto_msg;
208 hdr = l->pmsg;
209 tipc_msg_init(ownnode, hdr, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, peer);
210 msg_set_size(hdr, sizeof(l->proto_msg));
211 msg_set_session(hdr, session);
212 msg_set_bearer_id(hdr, l->bearer_id);
213 strcpy((char *)msg_data(hdr), if_name);
214 __skb_queue_head_init(&l->transmq);
215 __skb_queue_head_init(&l->backlogq);
216 __skb_queue_head_init(&l->deferdq);
217 skb_queue_head_init(&l->wakeupq);
218 skb_queue_head_init(l->inputq);
219 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100220}
221
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400222/* tipc_link_build_bcast_sync_msg() - synchronize broadcast link endpoints.
223 *
224 * Give a newly added peer node the sequence number where it should
225 * start receiving and acking broadcast packets.
226 */
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400227void tipc_link_build_bcast_sync_msg(struct tipc_link *l,
228 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400229{
230 struct sk_buff *skb;
231 struct sk_buff_head list;
Jon Maloy5a4c3552015-07-29 18:28:01 -0400232 u16 last_sent;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400233
234 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
235 0, l->addr, link_own_addr(l), 0, 0, 0);
236 if (!skb)
237 return;
Jon Maloy5a4c3552015-07-29 18:28:01 -0400238 last_sent = tipc_bclink_get_last_sent(l->owner->net);
239 msg_set_last_bcast(buf_msg(skb), last_sent);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400240 __skb_queue_head_init(&list);
241 __skb_queue_tail(&list, skb);
242 tipc_link_xmit(l, &list, xmitq);
243}
244
Per Lidenb97bf3f2006-01-02 19:04:38 +0100245/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400246 * tipc_link_fsm_evt - link finite state machine
247 * @l: pointer to link
248 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400249 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400250int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400251{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400252 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400253
254 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400255 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400256 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400257 case LINK_PEER_RESET_EVT:
258 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400259 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400260 case LINK_RESET_EVT:
261 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400262 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400263 case LINK_FAILURE_EVT:
264 case LINK_FAILOVER_BEGIN_EVT:
265 case LINK_ESTABLISH_EVT:
266 case LINK_FAILOVER_END_EVT:
267 case LINK_SYNCH_BEGIN_EVT:
268 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400269 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400270 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400271 }
272 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400273 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400274 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400275 case LINK_PEER_RESET_EVT:
276 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400277 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400278 case LINK_FAILOVER_BEGIN_EVT:
279 l->state = LINK_FAILINGOVER;
280 case LINK_FAILURE_EVT:
281 case LINK_RESET_EVT:
282 case LINK_ESTABLISH_EVT:
283 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400284 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400285 case LINK_SYNCH_BEGIN_EVT:
286 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400287 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400288 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400289 }
290 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400291 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400292 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400293 case LINK_RESET_EVT:
294 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400295 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400296 case LINK_PEER_RESET_EVT:
297 case LINK_ESTABLISH_EVT:
298 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400299 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400300 case LINK_SYNCH_BEGIN_EVT:
301 case LINK_SYNCH_END_EVT:
302 case LINK_FAILOVER_BEGIN_EVT:
303 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400304 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400305 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400306 }
307 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400308 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400309 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400310 case LINK_FAILOVER_END_EVT:
311 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400312 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400313 case LINK_PEER_RESET_EVT:
314 case LINK_RESET_EVT:
315 case LINK_ESTABLISH_EVT:
316 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400317 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400318 case LINK_FAILOVER_BEGIN_EVT:
319 case LINK_SYNCH_BEGIN_EVT:
320 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400321 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400322 goto illegal_evt;
323 }
324 break;
325 case LINK_ESTABLISHING:
326 switch (evt) {
327 case LINK_ESTABLISH_EVT:
328 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400329 break;
330 case LINK_FAILOVER_BEGIN_EVT:
331 l->state = LINK_FAILINGOVER;
332 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400333 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400334 l->state = LINK_RESET;
335 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400336 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400337 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400338 case LINK_SYNCH_BEGIN_EVT:
339 case LINK_FAILOVER_END_EVT:
340 break;
341 case LINK_SYNCH_END_EVT:
342 default:
343 goto illegal_evt;
344 }
345 break;
346 case LINK_ESTABLISHED:
347 switch (evt) {
348 case LINK_PEER_RESET_EVT:
349 l->state = LINK_PEER_RESET;
350 rc |= TIPC_LINK_DOWN_EVT;
351 break;
352 case LINK_FAILURE_EVT:
353 l->state = LINK_RESETTING;
354 rc |= TIPC_LINK_DOWN_EVT;
355 break;
356 case LINK_RESET_EVT:
357 l->state = LINK_RESET;
358 break;
359 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400360 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400361 break;
362 case LINK_SYNCH_BEGIN_EVT:
363 l->state = LINK_SYNCHING;
364 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400365 case LINK_FAILOVER_BEGIN_EVT:
366 case LINK_FAILOVER_END_EVT:
367 default:
368 goto illegal_evt;
369 }
370 break;
371 case LINK_SYNCHING:
372 switch (evt) {
373 case LINK_PEER_RESET_EVT:
374 l->state = LINK_PEER_RESET;
375 rc |= TIPC_LINK_DOWN_EVT;
376 break;
377 case LINK_FAILURE_EVT:
378 l->state = LINK_RESETTING;
379 rc |= TIPC_LINK_DOWN_EVT;
380 break;
381 case LINK_RESET_EVT:
382 l->state = LINK_RESET;
383 break;
384 case LINK_ESTABLISH_EVT:
385 case LINK_SYNCH_BEGIN_EVT:
386 break;
387 case LINK_SYNCH_END_EVT:
388 l->state = LINK_ESTABLISHED;
389 break;
390 case LINK_FAILOVER_BEGIN_EVT:
391 case LINK_FAILOVER_END_EVT:
392 default:
393 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400394 }
395 break;
396 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400397 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400398 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400399 return rc;
400illegal_evt:
401 pr_err("Illegal FSM event %x in state %x on link %s\n",
402 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400403 return rc;
404}
405
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400406/* link_profile_stats - update statistical profiling of traffic
407 */
408static void link_profile_stats(struct tipc_link *l)
409{
410 struct sk_buff *skb;
411 struct tipc_msg *msg;
412 int length;
413
414 /* Update counters used in statistical profiling of send traffic */
415 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
416 l->stats.queue_sz_counts++;
417
418 skb = skb_peek(&l->transmq);
419 if (!skb)
420 return;
421 msg = buf_msg(skb);
422 length = msg_size(msg);
423
424 if (msg_user(msg) == MSG_FRAGMENTER) {
425 if (msg_type(msg) != FIRST_FRAGMENT)
426 return;
427 length = msg_size(msg_get_wrapped(msg));
428 }
429 l->stats.msg_lengths_total += length;
430 l->stats.msg_length_counts++;
431 if (length <= 64)
432 l->stats.msg_length_profile[0]++;
433 else if (length <= 256)
434 l->stats.msg_length_profile[1]++;
435 else if (length <= 1024)
436 l->stats.msg_length_profile[2]++;
437 else if (length <= 4096)
438 l->stats.msg_length_profile[3]++;
439 else if (length <= 16384)
440 l->stats.msg_length_profile[4]++;
441 else if (length <= 32768)
442 l->stats.msg_length_profile[5]++;
443 else
444 l->stats.msg_length_profile[6]++;
445}
446
447/* tipc_link_timeout - perform periodic task as instructed from node timeout
448 */
449int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
450{
451 int rc = 0;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400452 int mtyp = STATE_MSG;
453 bool xmit = false;
454 bool prb = false;
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400455
456 link_profile_stats(l);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400457
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400458 switch (l->state) {
459 case LINK_ESTABLISHED:
460 case LINK_SYNCHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400461 if (!l->silent_intv_cnt) {
462 if (tipc_bclink_acks_missing(l->owner))
463 xmit = true;
464 } else if (l->silent_intv_cnt <= l->abort_limit) {
465 xmit = true;
466 prb = true;
467 } else {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400468 rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400469 }
470 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400471 break;
472 case LINK_RESET:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400473 xmit = true;
474 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400475 break;
476 case LINK_ESTABLISHING:
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400477 xmit = true;
478 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400479 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400480 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400481 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400482 case LINK_FAILINGOVER:
483 break;
484 default:
485 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400486 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400487
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400488 if (xmit)
489 tipc_link_build_proto_msg(l, mtyp, prb, 0, 0, 0, xmitq);
490
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400491 return rc;
492}
493
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400494/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400495 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400496 * @link: congested link
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400497 * @list: message that was attempted sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400498 * Create pseudo msg to send back to user when congestion abates
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400499 * Does not consume buffer list
Per Lidenb97bf3f2006-01-02 19:04:38 +0100500 */
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400501static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100502{
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400503 struct tipc_msg *msg = buf_msg(skb_peek(list));
504 int imp = msg_importance(msg);
505 u32 oport = msg_origport(msg);
506 u32 addr = link_own_addr(link);
507 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100508
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400509 /* This really cannot happen... */
510 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
511 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400512 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400513 }
514 /* Non-blocking sender: */
515 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
516 return -ELINKCONG;
517
518 /* Create and schedule wakeup pseudo message */
519 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
520 addr, addr, oport, 0, 0);
521 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400522 return -ENOBUFS;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400523 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
524 TIPC_SKB_CB(skb)->chain_imp = imp;
525 skb_queue_tail(&link->wakeupq, skb);
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400526 link->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400527 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100528}
529
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400530/**
531 * link_prepare_wakeup - prepare users for wakeup after congestion
532 * @link: congested link
533 * Move a number of waiting users, as permitted by available space in
534 * the send queue, from link wait queue to node wait queue for wakeup
535 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400536void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100537{
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400538 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
539 int imp, lim;
Ying Xue58d78b32014-11-26 11:41:51 +0800540 struct sk_buff *skb, *tmp;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100541
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400542 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
543 imp = TIPC_SKB_CB(skb)->chain_imp;
544 lim = l->window + l->backlog[imp].limit;
545 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
546 if ((pnd[imp] + l->backlog[imp].len) >= lim)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100547 break;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400548 skb_unlink(skb, &l->wakeupq);
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400549 skb_queue_tail(l->inputq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100550 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100551}
552
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900553/**
Per Liden4323add2006-01-18 00:38:21 +0100554 * tipc_link_reset_fragments - purge link's inbound message fragments queue
Per Lidenb97bf3f2006-01-02 19:04:38 +0100555 * @l_ptr: pointer to link
556 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500557void tipc_link_reset_fragments(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100558{
Jon Paul Maloy37e22162014-05-14 05:39:12 -0400559 kfree_skb(l_ptr->reasm_buf);
560 l_ptr->reasm_buf = NULL;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100561}
562
Jon Paul Maloy7d967b62015-06-28 09:44:44 -0400563void tipc_link_purge_backlog(struct tipc_link *l)
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400564{
565 __skb_queue_purge(&l->backlogq);
566 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
567 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
568 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
569 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
570 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
571}
572
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900573/**
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500574 * tipc_link_purge_queues - purge all pkt queues associated with link
Per Lidenb97bf3f2006-01-02 19:04:38 +0100575 * @l_ptr: pointer to link
576 */
Jon Paul Maloy581465f2014-01-07 17:02:44 -0500577void tipc_link_purge_queues(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100578{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400579 __skb_queue_purge(&l_ptr->deferdq);
580 __skb_queue_purge(&l_ptr->transmq);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400581 tipc_link_purge_backlog(l_ptr);
Per Liden4323add2006-01-18 00:38:21 +0100582 tipc_link_reset_fragments(l_ptr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100583}
584
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400585void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100586{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400587 tipc_link_fsm_evt(l, LINK_RESET_EVT);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100588
Allan Stephensa686e682008-06-04 17:29:39 -0700589 /* Link is down, accept any session */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400590 l->peer_session = WILDCARD_SESSION;
591
592 /* If peer is up, it only accepts an incremented session number */
593 msg_set_session(l->pmsg, msg_session(l->pmsg) + 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100594
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400595 /* Prepare for renewed mtu size negotiation */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400596 l->mtu = l->advertised_mtu;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900597
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400598 /* Clean up all queues: */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400599 __skb_queue_purge(&l->transmq);
600 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400601 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400602
603 tipc_link_purge_backlog(l);
604 kfree_skb(l->reasm_buf);
605 kfree_skb(l->failover_reasm_skb);
606 l->reasm_buf = NULL;
607 l->failover_reasm_skb = NULL;
608 l->rcv_unacked = 0;
609 l->snd_nxt = 1;
610 l->rcv_nxt = 1;
611 l->silent_intv_cnt = 0;
612 l->stats.recv_info = 0;
613 l->stale_count = 0;
614 link_reset_statistics(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100615}
616
Per Lidenb97bf3f2006-01-02 19:04:38 +0100617/**
Jon Paul Maloy9fbfb8b2014-07-16 20:41:03 -0400618 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500619 * @link: link to use
Ying Xuea6ca1092014-11-26 11:41:55 +0800620 * @list: chain of buffers containing message
621 *
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400622 * Consumes the buffer chain, except when returning an error code,
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400623 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
624 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500625 */
Ying Xue7f9f95d2015-01-09 15:27:06 +0800626int __tipc_link_xmit(struct net *net, struct tipc_link *link,
627 struct sk_buff_head *list)
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500628{
Ying Xuea6ca1092014-11-26 11:41:55 +0800629 struct tipc_msg *msg = buf_msg(skb_peek(list));
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400630 unsigned int maxwin = link->window;
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400631 unsigned int i, imp = msg_importance(msg);
Jon Paul Maloyed193ec2015-04-02 09:33:02 -0400632 uint mtu = link->mtu;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400633 u16 ack = mod(link->rcv_nxt - 1);
634 u16 seqno = link->snd_nxt;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -0400635 u16 bc_last_in = link->owner->bclink.last_in;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400636 struct tipc_media_addr *addr = link->media_addr;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400637 struct sk_buff_head *transmq = &link->transmq;
638 struct sk_buff_head *backlogq = &link->backlogq;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400639 struct sk_buff *skb, *bskb;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500640
Jon Paul Maloyf21e8972015-05-14 10:46:17 -0400641 /* Match msg importance against this and all higher backlog limits: */
642 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
643 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
644 return link_schedule_user(link, list);
645 }
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400646 if (unlikely(msg_size(msg) > mtu))
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500647 return -EMSGSIZE;
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400648
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400649 /* Prepare each packet for sending, and add to relevant queue: */
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400650 while (skb_queue_len(list)) {
651 skb = skb_peek(list);
Ying Xue58dc55f2014-11-26 11:41:52 +0800652 msg = buf_msg(skb);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400653 msg_set_seqno(msg, seqno);
654 msg_set_ack(msg, ack);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500655 msg_set_bcast_ack(msg, bc_last_in);
656
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400657 if (likely(skb_queue_len(transmq) < maxwin)) {
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400658 __skb_dequeue(list);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400659 __skb_queue_tail(transmq, skb);
660 tipc_bearer_send(net, link->bearer_id, skb, addr);
661 link->rcv_unacked = 0;
662 seqno++;
663 continue;
664 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400665 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
666 kfree_skb(__skb_dequeue(list));
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500667 link->stats.sent_bundled++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500668 continue;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400669 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400670 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
671 kfree_skb(__skb_dequeue(list));
672 __skb_queue_tail(backlogq, bskb);
673 link->backlog[msg_importance(buf_msg(bskb))].len++;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500674 link->stats.sent_bundled++;
675 link->stats.sent_bundles++;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400676 continue;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500677 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400678 link->backlog[imp].len += skb_queue_len(list);
679 skb_queue_splice_tail_init(list, backlogq);
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500680 }
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400681 link->snd_nxt = seqno;
Jon Paul Maloy4f1688b2014-06-25 20:41:32 -0500682 return 0;
683}
684
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400685/**
686 * tipc_link_xmit(): enqueue buffer list according to queue situation
687 * @link: link to use
688 * @list: chain of buffers containing message
689 * @xmitq: returned list of packets to be sent by caller
690 *
691 * Consumes the buffer chain, except when returning -ELINKCONG,
692 * since the caller then may want to make more send attempts.
693 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
694 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
695 */
696int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
697 struct sk_buff_head *xmitq)
698{
699 struct tipc_msg *hdr = buf_msg(skb_peek(list));
700 unsigned int maxwin = l->window;
701 unsigned int i, imp = msg_importance(hdr);
702 unsigned int mtu = l->mtu;
703 u16 ack = l->rcv_nxt - 1;
704 u16 seqno = l->snd_nxt;
705 u16 bc_last_in = l->owner->bclink.last_in;
706 struct sk_buff_head *transmq = &l->transmq;
707 struct sk_buff_head *backlogq = &l->backlogq;
708 struct sk_buff *skb, *_skb, *bskb;
709
710 /* Match msg importance against this and all higher backlog limits: */
711 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
712 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
713 return link_schedule_user(l, list);
714 }
715 if (unlikely(msg_size(hdr) > mtu))
716 return -EMSGSIZE;
717
718 /* Prepare each packet for sending, and add to relevant queue: */
719 while (skb_queue_len(list)) {
720 skb = skb_peek(list);
721 hdr = buf_msg(skb);
722 msg_set_seqno(hdr, seqno);
723 msg_set_ack(hdr, ack);
724 msg_set_bcast_ack(hdr, bc_last_in);
725
726 if (likely(skb_queue_len(transmq) < maxwin)) {
727 _skb = skb_clone(skb, GFP_ATOMIC);
728 if (!_skb)
729 return -ENOBUFS;
730 __skb_dequeue(list);
731 __skb_queue_tail(transmq, skb);
732 __skb_queue_tail(xmitq, _skb);
733 l->rcv_unacked = 0;
734 seqno++;
735 continue;
736 }
737 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
738 kfree_skb(__skb_dequeue(list));
739 l->stats.sent_bundled++;
740 continue;
741 }
742 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
743 kfree_skb(__skb_dequeue(list));
744 __skb_queue_tail(backlogq, bskb);
745 l->backlog[msg_importance(buf_msg(bskb))].len++;
746 l->stats.sent_bundled++;
747 l->stats.sent_bundles++;
748 continue;
749 }
750 l->backlog[imp].len += skb_queue_len(list);
751 skb_queue_splice_tail_init(list, backlogq);
752 }
753 l->snd_nxt = seqno;
754 return 0;
755}
756
Jon Maloyc64f7a62012-11-16 13:51:31 +0800757/*
Ying Xue247f0f32014-02-18 16:06:46 +0800758 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
Jon Maloyc64f7a62012-11-16 13:51:31 +0800759 * Receive the sequence number where we should start receiving and
760 * acking broadcast packets from a newly added peer node, and open
761 * up for reception of such packets.
762 *
763 * Called with node locked
764 */
Ying Xue247f0f32014-02-18 16:06:46 +0800765static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
Jon Maloyc64f7a62012-11-16 13:51:31 +0800766{
767 struct tipc_msg *msg = buf_msg(buf);
768
769 n->bclink.last_sent = n->bclink.last_in = msg_last_bcast(msg);
770 n->bclink.recv_permitted = true;
771 kfree_skb(buf);
772}
773
774/*
Ying Xue47b4c9a82014-11-26 11:41:48 +0800775 * tipc_link_push_packets - push unsent packets to bearer
776 *
777 * Push out the unsent messages of a link where congestion
778 * has abated. Node is locked.
779 *
780 * Called with node locked
Per Lidenb97bf3f2006-01-02 19:04:38 +0100781 */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400782void tipc_link_push_packets(struct tipc_link *link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100783{
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400784 struct sk_buff *skb;
Ying Xue47b4c9a82014-11-26 11:41:48 +0800785 struct tipc_msg *msg;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400786 u16 seqno = link->snd_nxt;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400787 u16 ack = mod(link->rcv_nxt - 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100788
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400789 while (skb_queue_len(&link->transmq) < link->window) {
790 skb = __skb_dequeue(&link->backlogq);
791 if (!skb)
Ying Xue47b4c9a82014-11-26 11:41:48 +0800792 break;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400793 msg = buf_msg(skb);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400794 link->backlog[msg_importance(msg)].len--;
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400795 msg_set_ack(msg, ack);
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400796 msg_set_seqno(msg, seqno);
797 seqno = mod(seqno + 1);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400798 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
799 link->rcv_unacked = 0;
800 __skb_queue_tail(&link->transmq, skb);
801 tipc_bearer_send(link->owner->net, link->bearer_id,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400802 skb, link->media_addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100803 }
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -0400804 link->snd_nxt = seqno;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100805}
806
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400807void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
808{
809 struct sk_buff *skb, *_skb;
810 struct tipc_msg *hdr;
811 u16 seqno = l->snd_nxt;
812 u16 ack = l->rcv_nxt - 1;
813
814 while (skb_queue_len(&l->transmq) < l->window) {
815 skb = skb_peek(&l->backlogq);
816 if (!skb)
817 break;
818 _skb = skb_clone(skb, GFP_ATOMIC);
819 if (!_skb)
820 break;
821 __skb_dequeue(&l->backlogq);
822 hdr = buf_msg(skb);
823 l->backlog[msg_importance(hdr)].len--;
824 __skb_queue_tail(&l->transmq, skb);
825 __skb_queue_tail(xmitq, _skb);
826 msg_set_ack(hdr, ack);
827 msg_set_seqno(hdr, seqno);
828 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
829 l->rcv_unacked = 0;
830 seqno++;
831 }
832 l->snd_nxt = seqno;
833}
834
Paul Gortmakera18c4bc2011-12-29 20:58:42 -0500835static void link_retransmit_failure(struct tipc_link *l_ptr,
Paul Gortmakerae8509c2013-06-17 10:54:47 -0400836 struct sk_buff *buf)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700837{
838 struct tipc_msg *msg = buf_msg(buf);
Ying Xue1da46562015-01-09 15:27:07 +0800839 struct net *net = l_ptr->owner->net;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700840
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400841 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700842
843 if (l_ptr->addr) {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700844 /* Handle failure on standard link */
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400845 link_print(l_ptr, "Resetting link ");
846 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
847 msg_user(msg), msg_type(msg), msg_size(msg),
848 msg_errcode(msg));
849 pr_info("sqno %u, prev: %x, src: %x\n",
850 msg_seqno(msg), msg_prevnode(msg), msg_orignode(msg));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700851 } else {
Allan Stephensd356eeb2006-06-25 23:40:01 -0700852 /* Handle failure on broadcast link */
David S. Miller6c000552008-09-02 23:38:32 -0700853 struct tipc_node *n_ptr;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700854 char addr_string[16];
855
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400856 pr_info("Msg seq number: %u, ", msg_seqno(msg));
857 pr_cont("Outstanding acks: %lu\n",
858 (unsigned long) TIPC_SKB_CB(buf)->handle);
Jeff Garzik617dbea2006-10-03 16:25:34 -0700859
Ying Xue1da46562015-01-09 15:27:07 +0800860 n_ptr = tipc_bclink_retransmit_to(net);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700861
Allan Stephensc68ca7b2010-05-11 14:30:12 +0000862 tipc_addr_string_fill(addr_string, n_ptr->addr);
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400863 pr_info("Broadcast link info for %s\n", addr_string);
Ying Xue389dd9b2012-11-16 13:51:30 +0800864 pr_info("Reception permitted: %d, Acked: %u\n",
865 n_ptr->bclink.recv_permitted,
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400866 n_ptr->bclink.acked);
867 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
868 n_ptr->bclink.last_in,
869 n_ptr->bclink.oos_state,
870 n_ptr->bclink.last_sent);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700871
Ying Xueb952b2b2015-03-26 18:10:23 +0800872 n_ptr->action_flags |= TIPC_BCAST_RESET;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700873 l_ptr->stale_count = 0;
874 }
875}
876
Ying Xue58dc55f2014-11-26 11:41:52 +0800877void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
Per Liden4323add2006-01-18 00:38:21 +0100878 u32 retransmits)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100879{
880 struct tipc_msg *msg;
881
Ying Xue58dc55f2014-11-26 11:41:52 +0800882 if (!skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700883 return;
884
Ying Xue58dc55f2014-11-26 11:41:52 +0800885 msg = buf_msg(skb);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900886
Erik Hugne512137e2013-12-06 10:08:00 -0500887 /* Detect repeated retransmit failures */
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400888 if (l_ptr->last_retransm == msg_seqno(msg)) {
Erik Hugne512137e2013-12-06 10:08:00 -0500889 if (++l_ptr->stale_count > 100) {
Ying Xue58dc55f2014-11-26 11:41:52 +0800890 link_retransmit_failure(l_ptr, skb);
Erik Hugne512137e2013-12-06 10:08:00 -0500891 return;
Allan Stephensd356eeb2006-06-25 23:40:01 -0700892 }
893 } else {
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400894 l_ptr->last_retransm = msg_seqno(msg);
Erik Hugne512137e2013-12-06 10:08:00 -0500895 l_ptr->stale_count = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100896 }
Allan Stephensd356eeb2006-06-25 23:40:01 -0700897
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -0400898 skb_queue_walk_from(&l_ptr->transmq, skb) {
899 if (!retransmits)
Ying Xue58dc55f2014-11-26 11:41:52 +0800900 break;
901 msg = buf_msg(skb);
Jon Paul Maloya97b9d32015-05-14 10:46:15 -0400902 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900903 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
Ying Xue7f9f95d2015-01-09 15:27:06 +0800904 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400905 l_ptr->media_addr);
Ying Xue3c294cb2012-11-15 11:34:45 +0800906 retransmits--;
907 l_ptr->stats.retransmitted++;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100908 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100909}
910
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400911static int tipc_link_retransm(struct tipc_link *l, int retransm,
912 struct sk_buff_head *xmitq)
913{
914 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
915 struct tipc_msg *hdr;
916
917 if (!skb)
918 return 0;
919
920 /* Detect repeated retransmit failures on same packet */
921 if (likely(l->last_retransm != buf_seqno(skb))) {
922 l->last_retransm = buf_seqno(skb);
923 l->stale_count = 1;
924 } else if (++l->stale_count > 100) {
925 link_retransmit_failure(l, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400926 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400927 }
928 skb_queue_walk(&l->transmq, skb) {
929 if (!retransm)
930 return 0;
931 hdr = buf_msg(skb);
932 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
933 if (!_skb)
934 return 0;
935 hdr = buf_msg(_skb);
936 msg_set_ack(hdr, l->rcv_nxt - 1);
937 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
938 _skb->priority = TC_PRIO_CONTROL;
939 __skb_queue_tail(xmitq, _skb);
940 retransm--;
941 l->stats.retransmitted++;
942 }
943 return 0;
944}
945
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500946/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +0200947 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500948 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +0200949 * Node lock must be held
950 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400951static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb,
952 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +0200953{
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500954 struct tipc_node *node = link->owner;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200955
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400956 switch (msg_user(buf_msg(skb))) {
Erik Hugne7ae934b2014-07-01 10:22:40 +0200957 case TIPC_LOW_IMPORTANCE:
958 case TIPC_MEDIUM_IMPORTANCE:
959 case TIPC_HIGH_IMPORTANCE:
960 case TIPC_CRITICAL_IMPORTANCE:
961 case CONN_MANAGER:
Jon Paul Maloy9945e802015-10-15 14:52:40 -0400962 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500963 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200964 case NAME_DISTRIBUTOR:
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500965 node->bclink.recv_permitted = true;
Jon Paul Maloyd39bbd42015-07-16 16:54:21 -0400966 skb_queue_tail(link->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500967 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +0200968 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -0400969 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500970 case MSG_FRAGMENTER:
971 case BCAST_PROTOCOL:
972 return false;
973 default:
974 pr_warn("Dropping received illegal msg type\n");
975 kfree_skb(skb);
976 return false;
977 };
978}
979
980/* tipc_link_input - process packet that has passed link protocol check
981 *
982 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500983 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -0400984static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
985 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500986{
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400987 struct tipc_node *node = l->owner;
988 struct tipc_msg *hdr = buf_msg(skb);
989 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500990 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -0400991 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400992 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400993 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400994 int pos = 0;
995 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -0500996
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400997 if (unlikely(usr == TUNNEL_PROTOCOL)) {
998 if (msg_type(hdr) == SYNCH_MSG) {
999 __skb_queue_purge(&l->deferdq);
1000 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001001 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001002 if (!tipc_msg_extract(skb, &iskb, &ipos))
1003 return rc;
1004 kfree_skb(skb);
1005 skb = iskb;
1006 hdr = buf_msg(skb);
1007 if (less(msg_seqno(hdr), l->drop_point))
1008 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001009 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001010 return rc;
1011 usr = msg_user(hdr);
1012 reasm_skb = &l->failover_reasm_skb;
1013 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001014
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001015 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001016 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001017 l->stats.recv_bundles++;
1018 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001019 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001020 tipc_data_input(l, iskb, &tmpq);
1021 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001022 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001023 } else if (usr == MSG_FRAGMENTER) {
1024 l->stats.recv_fragments++;
1025 if (tipc_buf_append(reasm_skb, &skb)) {
1026 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001027 tipc_data_input(l, skb, inputq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001028 } else if (!*reasm_skb) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001029 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001030 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001031 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001032 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001033 tipc_link_sync_rcv(node, skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001034 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001035 }
1036drop:
1037 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001038 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001039}
1040
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001041static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1042{
1043 bool released = false;
1044 struct sk_buff *skb, *tmp;
1045
1046 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1047 if (more(buf_seqno(skb), acked))
1048 break;
1049 __skb_unlink(skb, &l->transmq);
1050 kfree_skb(skb);
1051 released = true;
1052 }
1053 return released;
1054}
1055
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001056/* tipc_link_build_ack_msg: prepare link acknowledge message for transmission
1057 */
1058void tipc_link_build_ack_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1059{
1060 l->rcv_unacked = 0;
1061 l->stats.sent_acks++;
1062 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1063}
1064
1065/* tipc_link_build_nack_msg: prepare link nack message for transmission
1066 */
1067static void tipc_link_build_nack_msg(struct tipc_link *l,
1068 struct sk_buff_head *xmitq)
1069{
1070 u32 def_cnt = ++l->stats.deferred_recv;
1071
1072 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1073 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1074}
1075
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001076/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001077 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001078 * @skb: TIPC packet
1079 * @xmitq: queue to place packets to be sent after this call
1080 */
1081int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1082 struct sk_buff_head *xmitq)
1083{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001084 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001085 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001086 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001087 int rc = 0;
1088
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001089 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001090 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001091 seqno = msg_seqno(hdr);
1092 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001093 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001094
1095 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001096 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1097 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001098
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001099 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001100 if (l->state == LINK_ESTABLISHING)
1101 rc = TIPC_LINK_UP_EVT;
1102 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001103 }
1104
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001105 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001106 l->silent_intv_cnt = 0;
1107
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001108 /* Drop if outside receive window */
1109 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1110 l->stats.duplicates++;
1111 goto drop;
1112 }
1113
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001114 /* Forward queues and wake up waiting users */
1115 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1116 tipc_link_advance_backlog(l, xmitq);
1117 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1118 link_prepare_wakeup(l);
1119 }
1120
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001121 /* Defer delivery if sequence gap */
1122 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001123 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001124 tipc_link_build_nack_msg(l, xmitq);
1125 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001126 }
1127
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001128 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001129 l->rcv_nxt++;
1130 l->stats.recv_info++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001131 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001132 rc = tipc_link_input(l, skb, l->inputq);
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001133 if (unlikely(rc))
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001134 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001135 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1136 tipc_link_build_ack_msg(l, xmitq);
1137
1138 } while ((skb = __skb_dequeue(defq)));
1139
1140 return rc;
1141drop:
1142 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001143 return rc;
1144}
1145
Erik Hugne7ae934b2014-07-01 10:22:40 +02001146/**
Allan Stephens8809b252011-10-25 10:44:35 -04001147 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1148 *
1149 * Returns increase in queue length (i.e. 0 or 1)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001150 */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001151u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001152{
Ying Xuebc6fecd2014-11-26 11:41:53 +08001153 struct sk_buff *skb1;
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001154 u16 seq_no = buf_seqno(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001155
1156 /* Empty queue ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001157 if (skb_queue_empty(list)) {
1158 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001159 return 1;
1160 }
1161
1162 /* Last ? */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001163 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1164 __skb_queue_tail(list, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001165 return 1;
1166 }
1167
Allan Stephens8809b252011-10-25 10:44:35 -04001168 /* Locate insertion point in queue, then insert; discard if duplicate */
Ying Xuebc6fecd2014-11-26 11:41:53 +08001169 skb_queue_walk(list, skb1) {
Jon Paul Maloye4bf4f72015-05-14 10:46:14 -04001170 u16 curr_seqno = buf_seqno(skb1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001171
Allan Stephens8809b252011-10-25 10:44:35 -04001172 if (seq_no == curr_seqno) {
Ying Xuebc6fecd2014-11-26 11:41:53 +08001173 kfree_skb(skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001174 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001175 }
Allan Stephens8809b252011-10-25 10:44:35 -04001176
1177 if (less(seq_no, curr_seqno))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001178 break;
Allan Stephens8809b252011-10-25 10:44:35 -04001179 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001180
Ying Xuebc6fecd2014-11-26 11:41:53 +08001181 __skb_queue_before(list, skb1, skb);
Allan Stephens8809b252011-10-25 10:44:35 -04001182 return 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001183}
1184
Allan Stephens8809b252011-10-25 10:44:35 -04001185/*
Per Lidenb97bf3f2006-01-02 19:04:38 +01001186 * Send protocol message to the other endpoint.
1187 */
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001188void tipc_link_proto_xmit(struct tipc_link *l, u32 msg_typ, int probe_msg,
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001189 u32 gap, u32 tolerance, u32 priority)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001190{
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001191 struct sk_buff *skb = NULL;
1192 struct sk_buff_head xmitq;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001193
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001194 __skb_queue_head_init(&xmitq);
1195 tipc_link_build_proto_msg(l, msg_typ, probe_msg, gap,
1196 tolerance, priority, &xmitq);
1197 skb = __skb_dequeue(&xmitq);
1198 if (!skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001199 return;
Jon Paul Maloy440d8962015-07-30 18:24:26 -04001200 tipc_bearer_send(l->owner->net, l->bearer_id, skb, l->media_addr);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001201 l->rcv_unacked = 0;
1202 kfree_skb(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001203}
1204
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001205/* tipc_link_build_proto_msg: prepare link protocol message for transmission
1206 */
1207static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1208 u16 rcvgap, int tolerance, int priority,
1209 struct sk_buff_head *xmitq)
1210{
1211 struct sk_buff *skb = NULL;
1212 struct tipc_msg *hdr = l->pmsg;
1213 u16 snd_nxt = l->snd_nxt;
1214 u16 rcv_nxt = l->rcv_nxt;
1215 u16 rcv_last = rcv_nxt - 1;
1216 int node_up = l->owner->bclink.recv_permitted;
1217
1218 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001219 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001220 return;
1221
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001222 msg_set_type(hdr, mtyp);
1223 msg_set_net_plane(hdr, l->net_plane);
1224 msg_set_bcast_ack(hdr, l->owner->bclink.last_in);
1225 msg_set_last_bcast(hdr, tipc_bclink_get_last_sent(l->owner->net));
1226 msg_set_link_tolerance(hdr, tolerance);
1227 msg_set_linkprio(hdr, priority);
1228 msg_set_redundant_link(hdr, node_up);
1229 msg_set_seq_gap(hdr, 0);
1230
1231 /* Compatibility: created msg must not be in sequence with pkt flow */
1232 msg_set_seqno(hdr, snd_nxt + U16_MAX / 2);
1233
1234 if (mtyp == STATE_MSG) {
1235 if (!tipc_link_is_up(l))
1236 return;
1237 msg_set_next_sent(hdr, snd_nxt);
1238
1239 /* Override rcvgap if there are packets in deferred queue */
1240 if (!skb_queue_empty(&l->deferdq))
1241 rcvgap = buf_seqno(skb_peek(&l->deferdq)) - rcv_nxt;
1242 if (rcvgap) {
1243 msg_set_seq_gap(hdr, rcvgap);
1244 l->stats.sent_nacks++;
1245 }
1246 msg_set_ack(hdr, rcv_last);
1247 msg_set_probe(hdr, probe);
1248 if (probe)
1249 l->stats.sent_probes++;
1250 l->stats.sent_states++;
1251 } else {
1252 /* RESET_MSG or ACTIVATE_MSG */
1253 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001254 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001255 msg_set_next_sent(hdr, 1);
1256 }
1257 skb = tipc_buf_acquire(msg_size(hdr));
1258 if (!skb)
1259 return;
1260 skb_copy_to_linear_data(skb, hdr, msg_size(hdr));
1261 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001262 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001263}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001264
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001265/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001266 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001267 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001268void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1269 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001270{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001271 struct sk_buff *skb, *tnlskb;
1272 struct tipc_msg *hdr, tnlhdr;
1273 struct sk_buff_head *queue = &l->transmq;
1274 struct sk_buff_head tmpxq, tnlq;
1275 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001276
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001277 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001278 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001279
1280 skb_queue_head_init(&tnlq);
1281 skb_queue_head_init(&tmpxq);
1282
1283 /* At least one packet required for safe algorithm => add dummy */
1284 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1285 BASIC_H_SIZE, 0, l->addr, link_own_addr(l),
1286 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001287 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001288 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001289 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001290 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001291 skb_queue_tail(&tnlq, skb);
1292 tipc_link_xmit(l, &tnlq, &tmpxq);
1293 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001294
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001295 /* Initialize reusable tunnel packet header */
1296 tipc_msg_init(link_own_addr(l), &tnlhdr, TUNNEL_PROTOCOL,
1297 mtyp, INT_H_SIZE, l->addr);
1298 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1299 msg_set_msgcnt(&tnlhdr, pktcnt);
1300 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1301tnl:
1302 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001303 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001304 hdr = buf_msg(skb);
1305 if (queue == &l->backlogq)
1306 msg_set_seqno(hdr, seqno++);
1307 pktlen = msg_size(hdr);
1308 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1309 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1310 if (!tnlskb) {
1311 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001312 return;
1313 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001314 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1315 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1316 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001317 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001318 if (queue != &l->backlogq) {
1319 queue = &l->backlogq;
1320 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001321 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001322
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001323 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001324
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001325 if (mtyp == FAILOVER_MSG) {
1326 tnl->drop_point = l->rcv_nxt;
1327 tnl->failover_reasm_skb = l->reasm_buf;
1328 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001329 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001330}
1331
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001332/* tipc_link_proto_rcv(): receive link level protocol message :
1333 * Note that network plane id propagates through the network, and may
1334 * change at any time. The node with lowest numerical id determines
1335 * network plane
1336 */
1337static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1338 struct sk_buff_head *xmitq)
1339{
1340 struct tipc_msg *hdr = buf_msg(skb);
1341 u16 rcvgap = 0;
1342 u16 nacked_gap = msg_seq_gap(hdr);
1343 u16 peers_snd_nxt = msg_next_sent(hdr);
1344 u16 peers_tol = msg_link_tolerance(hdr);
1345 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001346 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001347 int mtyp = msg_type(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001348 char *if_name;
1349 int rc = 0;
1350
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001351 if (tipc_link_is_blocked(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001352 goto exit;
1353
1354 if (link_own_addr(l) > msg_prevnode(hdr))
1355 l->net_plane = msg_net_plane(hdr);
1356
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001357 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001358 case RESET_MSG:
1359
1360 /* Ignore duplicate RESET with old session number */
1361 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1362 (l->peer_session != WILDCARD_SESSION))
1363 break;
1364 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001365
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001366 case ACTIVATE_MSG:
1367
1368 /* Complete own link name with peer's interface name */
1369 if_name = strrchr(l->name, ':') + 1;
1370 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1371 break;
1372 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1373 break;
1374 strncpy(if_name, msg_data(hdr), TIPC_MAX_IF_NAME);
1375
1376 /* Update own tolerance if peer indicates a non-zero value */
1377 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1378 l->tolerance = peers_tol;
1379
1380 /* Update own priority if peer's priority is higher */
1381 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1382 l->priority = peers_prio;
1383
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001384 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1385 if ((mtyp == RESET_MSG) || !link_is_up(l))
1386 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1387
1388 /* ACTIVATE_MSG takes up link if it was already locally reset */
1389 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1390 rc = TIPC_LINK_UP_EVT;
1391
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001392 l->peer_session = msg_session(hdr);
1393 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001394 if (l->mtu > msg_max_pkt(hdr))
1395 l->mtu = msg_max_pkt(hdr);
1396 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001397
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001398 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001399
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001400 /* Update own tolerance if peer indicates a non-zero value */
1401 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1402 l->tolerance = peers_tol;
1403
1404 l->silent_intv_cnt = 0;
1405 l->stats.recv_states++;
1406 if (msg_probe(hdr))
1407 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001408
1409 if (!link_is_up(l)) {
1410 if (l->state == LINK_ESTABLISHING)
1411 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001412 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001413 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001414
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001415 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001416 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001417 rcvgap = peers_snd_nxt - l->rcv_nxt;
1418 if (rcvgap || (msg_probe(hdr)))
1419 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
Jon Paul Maloy16040892015-07-21 06:42:28 -04001420 0, 0, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001421 tipc_link_release_pkts(l, msg_ack(hdr));
1422
1423 /* If NACK, retransmit will now start at right position */
1424 if (nacked_gap) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001425 rc = tipc_link_retransm(l, nacked_gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001426 l->stats.recv_nacks++;
1427 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001428
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001429 tipc_link_advance_backlog(l, xmitq);
1430 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1431 link_prepare_wakeup(l);
1432 }
1433exit:
1434 kfree_skb(skb);
1435 return rc;
1436}
1437
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001438void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001439{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001440 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001441
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001442 l->window = win;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001443 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1444 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1445 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1446 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1447 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001448}
1449
Jon Paul Maloye099e862014-02-13 17:29:18 -05001450/* tipc_link_find_owner - locate owner node of link by link's name
Ying Xuef2f98002015-01-09 15:27:05 +08001451 * @net: the applicable net namespace
Jon Paul Maloye099e862014-02-13 17:29:18 -05001452 * @name: pointer to link name string
1453 * @bearer_id: pointer to index in 'node->links' array where the link was found.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001454 *
Jon Paul Maloye099e862014-02-13 17:29:18 -05001455 * Returns pointer to node owning the link, or 0 if no matching link is found.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001456 */
Ying Xuef2f98002015-01-09 15:27:05 +08001457static struct tipc_node *tipc_link_find_owner(struct net *net,
1458 const char *link_name,
Jon Paul Maloye099e862014-02-13 17:29:18 -05001459 unsigned int *bearer_id)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001460{
Ying Xuef2f98002015-01-09 15:27:05 +08001461 struct tipc_net *tn = net_generic(net, tipc_net_id);
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001462 struct tipc_link *l_ptr;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001463 struct tipc_node *n_ptr;
Fabian Frederick886eaa12014-12-25 12:05:50 +01001464 struct tipc_node *found_node = NULL;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001465 int i;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001466
Jon Paul Maloye099e862014-02-13 17:29:18 -05001467 *bearer_id = 0;
Ying Xue6c7a7622014-03-27 12:54:37 +08001468 rcu_read_lock();
Ying Xuef2f98002015-01-09 15:27:05 +08001469 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001470 tipc_node_lock(n_ptr);
Erik Hugnebbfbe472013-10-18 07:23:21 +02001471 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001472 l_ptr = n_ptr->links[i].link;
Jon Paul Maloye099e862014-02-13 17:29:18 -05001473 if (l_ptr && !strcmp(l_ptr->name, link_name)) {
1474 *bearer_id = i;
1475 found_node = n_ptr;
1476 break;
1477 }
Erik Hugnebbfbe472013-10-18 07:23:21 +02001478 }
Jon Paul Maloya11607f2014-02-14 16:40:44 -05001479 tipc_node_unlock(n_ptr);
Jon Paul Maloye099e862014-02-13 17:29:18 -05001480 if (found_node)
1481 break;
Erik Hugnebbfbe472013-10-18 07:23:21 +02001482 }
Ying Xue6c7a7622014-03-27 12:54:37 +08001483 rcu_read_unlock();
1484
Jon Paul Maloye099e862014-02-13 17:29:18 -05001485 return found_node;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001486}
1487
Allan Stephens5c216e12011-10-18 11:34:29 -04001488/**
Per Lidenb97bf3f2006-01-02 19:04:38 +01001489 * link_reset_statistics - reset link statistics
1490 * @l_ptr: pointer to link
1491 */
Paul Gortmakera18c4bc2011-12-29 20:58:42 -05001492static void link_reset_statistics(struct tipc_link *l_ptr)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001493{
1494 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001495 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1496 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001497}
1498
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001499static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001500{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001501 struct sk_buff *hskb = skb_peek(&l->transmq);
1502 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt;
1503 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001504
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001505 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001506 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1507 skb_queue_len(&l->transmq), head, tail,
1508 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001509}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001510
1511/* Parse and validate nested (link) properties valid for media, bearer and link
1512 */
1513int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1514{
1515 int err;
1516
1517 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1518 tipc_nl_prop_policy);
1519 if (err)
1520 return err;
1521
1522 if (props[TIPC_NLA_PROP_PRIO]) {
1523 u32 prio;
1524
1525 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1526 if (prio > TIPC_MAX_LINK_PRI)
1527 return -EINVAL;
1528 }
1529
1530 if (props[TIPC_NLA_PROP_TOL]) {
1531 u32 tol;
1532
1533 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1534 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1535 return -EINVAL;
1536 }
1537
1538 if (props[TIPC_NLA_PROP_WIN]) {
1539 u32 win;
1540
1541 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1542 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1543 return -EINVAL;
1544 }
1545
1546 return 0;
1547}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001548
Richard Alpef96ce7a2014-11-20 10:29:13 +01001549int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1550{
1551 int err;
1552 int res = 0;
1553 int bearer_id;
1554 char *name;
1555 struct tipc_link *link;
1556 struct tipc_node *node;
1557 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe37e2d482015-02-09 09:50:08 +01001558 struct net *net = sock_net(skb->sk);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001559
1560 if (!info->attrs[TIPC_NLA_LINK])
1561 return -EINVAL;
1562
1563 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1564 info->attrs[TIPC_NLA_LINK],
1565 tipc_nl_link_policy);
1566 if (err)
1567 return err;
1568
1569 if (!attrs[TIPC_NLA_LINK_NAME])
1570 return -EINVAL;
1571
1572 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1573
Richard Alpe670f4f82015-05-06 13:58:55 +02001574 if (strcmp(name, tipc_bclink_name) == 0)
1575 return tipc_nl_bc_link_set(net, attrs);
1576
Ying Xuef2f98002015-01-09 15:27:05 +08001577 node = tipc_link_find_owner(net, name, &bearer_id);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001578 if (!node)
1579 return -EINVAL;
1580
1581 tipc_node_lock(node);
1582
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001583 link = node->links[bearer_id].link;
Richard Alpef96ce7a2014-11-20 10:29:13 +01001584 if (!link) {
1585 res = -EINVAL;
1586 goto out;
1587 }
1588
1589 if (attrs[TIPC_NLA_LINK_PROP]) {
1590 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1591
1592 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1593 props);
1594 if (err) {
1595 res = err;
1596 goto out;
1597 }
1598
1599 if (props[TIPC_NLA_PROP_TOL]) {
1600 u32 tol;
1601
1602 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -04001603 link->tolerance = tol;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001604 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001605 }
1606 if (props[TIPC_NLA_PROP_PRIO]) {
1607 u32 prio;
1608
1609 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1610 link->priority = prio;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001611 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio);
Richard Alpef96ce7a2014-11-20 10:29:13 +01001612 }
1613 if (props[TIPC_NLA_PROP_WIN]) {
1614 u32 win;
1615
1616 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1617 tipc_link_set_queue_limits(link, win);
1618 }
1619 }
1620
1621out:
1622 tipc_node_unlock(node);
1623
1624 return res;
1625}
Richard Alped8182802014-11-24 11:10:29 +01001626
1627static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001628{
1629 int i;
1630 struct nlattr *stats;
1631
1632 struct nla_map {
1633 u32 key;
1634 u32 val;
1635 };
1636
1637 struct nla_map map[] = {
1638 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1639 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1640 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1641 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1642 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1643 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1644 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1645 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1646 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1647 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1648 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1649 s->msg_length_counts : 1},
1650 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1651 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1652 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1653 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1654 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1655 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1656 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1657 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1658 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1659 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1660 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1661 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1662 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1663 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1664 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1665 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1666 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1667 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1668 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1669 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1670 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1671 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1672 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1673 };
1674
1675 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1676 if (!stats)
1677 return -EMSGSIZE;
1678
1679 for (i = 0; i < ARRAY_SIZE(map); i++)
1680 if (nla_put_u32(skb, map[i].key, map[i].val))
1681 goto msg_full;
1682
1683 nla_nest_end(skb, stats);
1684
1685 return 0;
1686msg_full:
1687 nla_nest_cancel(skb, stats);
1688
1689 return -EMSGSIZE;
1690}
1691
1692/* Caller should hold appropriate locks to protect the link */
Ying Xue34747532015-01-09 15:27:10 +08001693static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001694 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001695{
1696 int err;
1697 void *hdr;
1698 struct nlattr *attrs;
1699 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001700 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001701
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001702 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001703 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001704 if (!hdr)
1705 return -EMSGSIZE;
1706
1707 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1708 if (!attrs)
1709 goto msg_full;
1710
1711 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1712 goto attr_msg_full;
1713 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001714 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001715 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001716 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001717 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001718 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001719 goto attr_msg_full;
Jon Paul Maloya97b9d32015-05-14 10:46:15 -04001720 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001721 goto attr_msg_full;
1722
1723 if (tipc_link_is_up(link))
1724 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1725 goto attr_msg_full;
1726 if (tipc_link_is_active(link))
1727 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1728 goto attr_msg_full;
1729
1730 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1731 if (!prop)
1732 goto attr_msg_full;
1733 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1734 goto prop_msg_full;
1735 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1736 goto prop_msg_full;
1737 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001738 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001739 goto prop_msg_full;
1740 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1741 goto prop_msg_full;
1742 nla_nest_end(msg->skb, prop);
1743
1744 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1745 if (err)
1746 goto attr_msg_full;
1747
1748 nla_nest_end(msg->skb, attrs);
1749 genlmsg_end(msg->skb, hdr);
1750
1751 return 0;
1752
1753prop_msg_full:
1754 nla_nest_cancel(msg->skb, prop);
1755attr_msg_full:
1756 nla_nest_cancel(msg->skb, attrs);
1757msg_full:
1758 genlmsg_cancel(msg->skb, hdr);
1759
1760 return -EMSGSIZE;
1761}
1762
1763/* Caller should hold node lock */
Ying Xue34747532015-01-09 15:27:10 +08001764static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1765 struct tipc_node *node, u32 *prev_link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001766{
1767 u32 i;
1768 int err;
1769
1770 for (i = *prev_link; i < MAX_BEARERS; i++) {
1771 *prev_link = i;
1772
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001773 if (!node->links[i].link)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001774 continue;
1775
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001776 err = __tipc_nl_add_link(net, msg,
1777 node->links[i].link, NLM_F_MULTI);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001778 if (err)
1779 return err;
1780 }
1781 *prev_link = 0;
1782
1783 return 0;
1784}
1785
1786int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
1787{
Ying Xuef2f98002015-01-09 15:27:05 +08001788 struct net *net = sock_net(skb->sk);
1789 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001790 struct tipc_node *node;
1791 struct tipc_nl_msg msg;
1792 u32 prev_node = cb->args[0];
1793 u32 prev_link = cb->args[1];
1794 int done = cb->args[2];
1795 int err;
1796
1797 if (done)
1798 return 0;
1799
1800 msg.skb = skb;
1801 msg.portid = NETLINK_CB(cb->skb).portid;
1802 msg.seq = cb->nlh->nlmsg_seq;
1803
1804 rcu_read_lock();
Richard Alpe7be57fc2014-11-20 10:29:12 +01001805 if (prev_node) {
Ying Xuef2f98002015-01-09 15:27:05 +08001806 node = tipc_node_find(net, prev_node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001807 if (!node) {
1808 /* We never set seq or call nl_dump_check_consistent()
1809 * this means that setting prev_seq here will cause the
1810 * consistence check to fail in the netlink callback
1811 * handler. Resulting in the last NLMSG_DONE message
1812 * having the NLM_F_DUMP_INTR flag set.
1813 */
1814 cb->prev_seq = 1;
1815 goto out;
1816 }
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001817 tipc_node_put(node);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001818
Ying Xuef2f98002015-01-09 15:27:05 +08001819 list_for_each_entry_continue_rcu(node, &tn->node_list,
1820 list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01001821 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08001822 err = __tipc_nl_add_node_links(net, &msg, node,
1823 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001824 tipc_node_unlock(node);
1825 if (err)
1826 goto out;
1827
1828 prev_node = node->addr;
1829 }
1830 } else {
Ying Xue1da46562015-01-09 15:27:07 +08001831 err = tipc_nl_add_bc_link(net, &msg);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001832 if (err)
1833 goto out;
1834
Ying Xuef2f98002015-01-09 15:27:05 +08001835 list_for_each_entry_rcu(node, &tn->node_list, list) {
Richard Alpe7be57fc2014-11-20 10:29:12 +01001836 tipc_node_lock(node);
Ying Xue34747532015-01-09 15:27:10 +08001837 err = __tipc_nl_add_node_links(net, &msg, node,
1838 &prev_link);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001839 tipc_node_unlock(node);
1840 if (err)
1841 goto out;
1842
1843 prev_node = node->addr;
1844 }
1845 }
1846 done = 1;
1847out:
1848 rcu_read_unlock();
1849
1850 cb->args[0] = prev_node;
1851 cb->args[1] = prev_link;
1852 cb->args[2] = done;
1853
1854 return skb->len;
1855}
1856
1857int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
1858{
Ying Xuef2f98002015-01-09 15:27:05 +08001859 struct net *net = genl_info_net(info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001860 struct tipc_nl_msg msg;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001861 char *name;
Richard Alpe7be57fc2014-11-20 10:29:12 +01001862 int err;
1863
Richard Alpe7be57fc2014-11-20 10:29:12 +01001864 msg.portid = info->snd_portid;
1865 msg.seq = info->snd_seq;
1866
Richard Alpe670f4f82015-05-06 13:58:55 +02001867 if (!info->attrs[TIPC_NLA_LINK_NAME])
1868 return -EINVAL;
1869 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1870
1871 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1872 if (!msg.skb)
1873 return -ENOMEM;
1874
1875 if (strcmp(name, tipc_bclink_name) == 0) {
1876 err = tipc_nl_add_bc_link(net, &msg);
1877 if (err) {
1878 nlmsg_free(msg.skb);
1879 return err;
1880 }
1881 } else {
1882 int bearer_id;
1883 struct tipc_node *node;
1884 struct tipc_link *link;
1885
1886 node = tipc_link_find_owner(net, name, &bearer_id);
1887 if (!node)
1888 return -EINVAL;
1889
1890 tipc_node_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001891 link = node->links[bearer_id].link;
Richard Alpe670f4f82015-05-06 13:58:55 +02001892 if (!link) {
1893 tipc_node_unlock(node);
1894 nlmsg_free(msg.skb);
1895 return -EINVAL;
1896 }
1897
1898 err = __tipc_nl_add_link(net, &msg, link, 0);
1899 tipc_node_unlock(node);
1900 if (err) {
1901 nlmsg_free(msg.skb);
1902 return err;
1903 }
Richard Alpe7be57fc2014-11-20 10:29:12 +01001904 }
1905
Richard Alpe670f4f82015-05-06 13:58:55 +02001906 return genlmsg_reply(msg.skb, info);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001907}
Richard Alpeae363422014-11-20 10:29:14 +01001908
1909int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
1910{
1911 int err;
1912 char *link_name;
1913 unsigned int bearer_id;
1914 struct tipc_link *link;
1915 struct tipc_node *node;
1916 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
Richard Alpe18178772015-02-09 09:50:09 +01001917 struct net *net = sock_net(skb->sk);
Richard Alpeae363422014-11-20 10:29:14 +01001918
1919 if (!info->attrs[TIPC_NLA_LINK])
1920 return -EINVAL;
1921
1922 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1923 info->attrs[TIPC_NLA_LINK],
1924 tipc_nl_link_policy);
1925 if (err)
1926 return err;
1927
1928 if (!attrs[TIPC_NLA_LINK_NAME])
1929 return -EINVAL;
1930
1931 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1932
1933 if (strcmp(link_name, tipc_bclink_name) == 0) {
Ying Xue1da46562015-01-09 15:27:07 +08001934 err = tipc_bclink_reset_stats(net);
Richard Alpeae363422014-11-20 10:29:14 +01001935 if (err)
1936 return err;
1937 return 0;
1938 }
1939
Ying Xuef2f98002015-01-09 15:27:05 +08001940 node = tipc_link_find_owner(net, link_name, &bearer_id);
Richard Alpeae363422014-11-20 10:29:14 +01001941 if (!node)
1942 return -EINVAL;
1943
1944 tipc_node_lock(node);
1945
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001946 link = node->links[bearer_id].link;
Richard Alpeae363422014-11-20 10:29:14 +01001947 if (!link) {
1948 tipc_node_unlock(node);
1949 return -EINVAL;
1950 }
1951
1952 link_reset_statistics(link);
1953
1954 tipc_node_unlock(node);
1955
1956 return 0;
1957}