blob: 26cc033ee167591d186ecfbb5a6fbbb87d6c6d10 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010046
Ying Xue796c75d2013-06-17 10:54:48 -040047#include <linux/pkt_sched.h>
48
Jon Paul Maloy38206d52015-11-19 14:30:46 -050049struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050050 u32 sent_pkts;
51 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050052 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77};
78
79/**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050091 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040099 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
Jon Maloya4dc70d2018-07-06 15:22:36 +0200109 * @stale_cnt: counter for number of identical retransmit attempts
110 * @stale_limit: time when repeated identical retransmits must force link reset
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500111 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages
114 * @deferred_queue: deferred queue saved OOS b'cast message received from node
115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116 * @inputq: buffer queue for messages to be delivered upwards
117 * @namedq: buffer queue for name table messages to be delivered upwards
118 * @next_out: ptr to first unsent outbound message in queue
119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121 * @reasm_buf: head of partially reassembled inbound message fragments
122 * @bc_rcvr: marks that this is a broadcast receiver link
123 * @stats: collects statistics regarding link activity
124 */
125struct tipc_link {
126 u32 addr;
127 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500128 struct net *net;
129
130 /* Management and link supervision data */
Jon Maloy7ea817f2018-07-10 01:07:36 +0200131 u16 peer_session;
132 u16 session;
Jon Maloy9012de52018-07-10 01:07:35 +0200133 u16 snd_nxt_state;
134 u16 rcv_nxt_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500135 u32 peer_bearer_id;
136 u32 bearer_id;
137 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500138 u32 abort_limit;
139 u32 state;
140 u16 peer_caps;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200141 bool in_session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500142 bool active;
143 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500144 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500145 u32 priority;
146 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400147 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400148 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500149
150 /* Failover/synch */
151 u16 drop_point;
152 struct sk_buff *failover_reasm_skb;
153
154 /* Max packet negotiation */
155 u16 mtu;
156 u16 advertised_mtu;
157
158 /* Sending */
159 struct sk_buff_head transmq;
160 struct sk_buff_head backlogq;
161 struct {
162 u16 len;
163 u16 limit;
164 } backlog[5];
165 u16 snd_nxt;
166 u16 last_retransm;
167 u16 window;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200168 u16 stale_cnt;
169 unsigned long stale_limit;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500170
171 /* Reception */
172 u16 rcv_nxt;
173 u32 rcv_unacked;
174 struct sk_buff_head deferdq;
175 struct sk_buff_head *inputq;
176 struct sk_buff_head *namedq;
177
178 /* Congestion handling */
179 struct sk_buff_head wakeupq;
180
181 /* Fragmentation/reassembly */
182 struct sk_buff *reasm_buf;
183
184 /* Broadcast */
185 u16 ackers;
186 u16 acked;
187 struct tipc_link *bc_rcvlink;
188 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400189 unsigned long prev_retr;
190 u16 prev_from;
191 u16 prev_to;
192 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500193 bool bc_peer_is_up;
194
195 /* Statistics */
196 struct tipc_stats stats;
197};
198
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400199/*
200 * Error message prefixes
201 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400202static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400203static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100204
Jon Paul Maloy52666982015-10-22 08:51:41 -0400205/* Send states for broadcast NACKs
206 */
207enum {
208 BC_NACK_SND_CONDITIONAL,
209 BC_NACK_SND_UNCONDITIONAL,
210 BC_NACK_SND_SUPPRESS,
211};
212
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400213#define TIPC_BC_RETR_LIMIT 10 /* [ms] */
214
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900215/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400216 * Interval between NACKs when packets arrive out of order
217 */
218#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500219
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400220/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400221 */
222enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400223 LINK_ESTABLISHED = 0xe,
224 LINK_ESTABLISHING = 0xe << 4,
225 LINK_RESET = 0x1 << 8,
226 LINK_RESETTING = 0x2 << 12,
227 LINK_PEER_RESET = 0xd << 16,
228 LINK_FAILINGOVER = 0xf << 20,
229 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400230};
231
232/* Link FSM state checking routines
233 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400234static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400235{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400236 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400237}
238
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400239static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400241static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100242 bool probe_reply, u16 rcvgap,
243 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400244 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500245static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400246static int tipc_link_build_nack_msg(struct tipc_link *l,
247 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400248static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 struct sk_buff_head *xmitq);
250static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400251
Per Lidenb97bf3f2006-01-02 19:04:38 +0100252/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800253 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100254 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400255bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400257 return link_is_up(l);
258}
259
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400260bool tipc_link_peer_is_down(struct tipc_link *l)
261{
262 return l->state == LINK_PEER_RESET;
263}
264
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400265bool tipc_link_is_reset(struct tipc_link *l)
266{
267 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
268}
269
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400270bool tipc_link_is_establishing(struct tipc_link *l)
271{
272 return l->state == LINK_ESTABLISHING;
273}
274
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400275bool tipc_link_is_synching(struct tipc_link *l)
276{
277 return l->state == LINK_SYNCHING;
278}
279
280bool tipc_link_is_failingover(struct tipc_link *l)
281{
282 return l->state == LINK_FAILINGOVER;
283}
284
285bool tipc_link_is_blocked(struct tipc_link *l)
286{
287 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100288}
289
Wu Fengguang742e0382015-10-24 22:56:01 +0800290static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400291{
292 return !l->bc_sndlink;
293}
294
Wu Fengguang742e0382015-10-24 22:56:01 +0800295static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400296{
297 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
298}
299
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400300void tipc_link_set_active(struct tipc_link *l, bool active)
301{
302 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100303}
304
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500305u32 tipc_link_id(struct tipc_link *l)
306{
307 return l->peer_bearer_id << 16 | l->bearer_id;
308}
309
310int tipc_link_window(struct tipc_link *l)
311{
312 return l->window;
313}
314
315int tipc_link_prio(struct tipc_link *l)
316{
317 return l->priority;
318}
319
320unsigned long tipc_link_tolerance(struct tipc_link *l)
321{
322 return l->tolerance;
323}
324
325struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
326{
327 return l->inputq;
328}
329
330char tipc_link_plane(struct tipc_link *l)
331{
332 return l->net_plane;
333}
334
Jon Maloy9012de52018-07-10 01:07:35 +0200335void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
336{
337 l->peer_caps = capabilities;
338}
339
Jon Paul Maloy52666982015-10-22 08:51:41 -0400340void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 struct tipc_link *uc_l,
342 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400343{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400344 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
345
346 snd_l->ackers++;
347 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500348 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400349 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400350}
351
Jon Paul Maloy52666982015-10-22 08:51:41 -0400352void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 struct tipc_link *rcv_l,
354 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400355{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400356 u16 ack = snd_l->snd_nxt - 1;
357
358 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400359 rcv_l->bc_peer_is_up = true;
360 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400361 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 tipc_link_reset(rcv_l);
363 rcv_l->state = LINK_RESET;
364 if (!snd_l->ackers) {
365 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500366 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400367 __skb_queue_purge(xmitq);
368 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400369}
370
371int tipc_link_bc_peers(struct tipc_link *l)
372{
373 return l->ackers;
374}
375
YueHaibinge064cce2018-07-19 17:16:59 +0800376static u16 link_bc_rcv_gap(struct tipc_link *l)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400377{
378 struct sk_buff *skb = skb_peek(&l->deferdq);
379 u16 gap = 0;
380
381 if (more(l->snd_nxt, l->rcv_nxt))
382 gap = l->snd_nxt - l->rcv_nxt;
383 if (skb)
384 gap = buf_seqno(skb) - l->rcv_nxt;
385 return gap;
386}
387
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400388void tipc_link_set_mtu(struct tipc_link *l, int mtu)
389{
390 l->mtu = mtu;
391}
392
393int tipc_link_mtu(struct tipc_link *l)
394{
395 return l->mtu;
396}
397
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500398u16 tipc_link_rcv_nxt(struct tipc_link *l)
399{
400 return l->rcv_nxt;
401}
402
403u16 tipc_link_acked(struct tipc_link *l)
404{
405 return l->acked;
406}
407
408char *tipc_link_name(struct tipc_link *l)
409{
410 return l->name;
411}
412
Per Lidenb97bf3f2006-01-02 19:04:38 +0100413/**
Per Liden4323add2006-01-18 00:38:21 +0100414 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400415 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400416 * @if_name: associated interface name
417 * @bearer_id: id (index) of associated bearer
418 * @tolerance: link tolerance to be used by link
419 * @net_plane: network plane (A,B,c..) this link belongs to
420 * @mtu: mtu to be advertised by link
421 * @priority: priority to be used by link
422 * @window: send window to be used by link
423 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400424 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400425 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400426 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400427 * @bc_sndlink: the namespace global link used for broadcast sending
428 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400429 * @inputq: queue to put messages ready for delivery
430 * @namedq: queue to put binding table update messages ready for delivery
431 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900432 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400433 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100434 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400435bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400436 int tolerance, char net_plane, u32 mtu, int priority,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100437 int window, u32 session, u32 self,
438 u32 peer, u8 *peer_id, u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400439 struct tipc_link *bc_sndlink,
440 struct tipc_link *bc_rcvlink,
441 struct sk_buff_head *inputq,
442 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400443 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100444{
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100445 char peer_str[NODE_ID_STR_LEN] = {0,};
446 char self_str[NODE_ID_STR_LEN] = {0,};
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400447 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500448
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400449 l = kzalloc(sizeof(*l), GFP_ATOMIC);
450 if (!l)
451 return false;
452 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500453 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400454
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100455 /* Set link name for unicast links only */
456 if (peer_id) {
457 tipc_nodeid2string(self_str, tipc_own_id(net));
458 if (strlen(self_str) > 16)
459 sprintf(self_str, "%x", self);
460 tipc_nodeid2string(peer_str, peer_id);
461 if (strlen(peer_str) > 16)
462 sprintf(peer_str, "%x", peer);
463 }
464 /* Peer i/f name will be completed by reset/activate message */
Jon Maloy7494cfa2018-03-29 23:20:45 +0200465 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
466 self_str, if_name, peer_str);
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100467
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500468 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400469 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400470 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400471 l->net = net;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200472 l->in_session = false;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400473 l->bearer_id = bearer_id;
474 l->tolerance = tolerance;
475 l->net_plane = net_plane;
476 l->advertised_mtu = mtu;
477 l->mtu = mtu;
478 l->priority = priority;
479 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400480 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400481 l->bc_sndlink = bc_sndlink;
482 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400483 l->inputq = inputq;
484 l->namedq = namedq;
485 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400486 __skb_queue_head_init(&l->transmq);
487 __skb_queue_head_init(&l->backlogq);
488 __skb_queue_head_init(&l->deferdq);
489 skb_queue_head_init(&l->wakeupq);
490 skb_queue_head_init(l->inputq);
491 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100492}
493
Jon Paul Maloy32301902015-10-22 08:51:37 -0400494/**
495 * tipc_link_bc_create - create new link to be used for broadcast
496 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100497 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400498 * @window: send window to be used
499 * @inputq: queue to put messages ready for delivery
500 * @namedq: queue to put binding table update messages ready for delivery
501 * @link: return value, pointer to put the created link
502 *
503 * Returns true if link was created, otherwise false
504 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400505bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400506 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400507 struct sk_buff_head *inputq,
508 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400509 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400510 struct tipc_link **link)
511{
512 struct tipc_link *l;
513
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400514 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100515 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400516 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400517 return false;
518
519 l = *link;
520 strcpy(l->name, tipc_bclink_name);
521 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400522 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400523 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400524 l->bc_rcvlink = l;
525
526 /* Broadcast send link is always up */
527 if (link_is_bc_sndlink(l))
528 l->state = LINK_ESTABLISHED;
529
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500530 /* Disable replicast if even a single peer doesn't support it */
531 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
532 tipc_bcast_disable_rcast(net);
533
Jon Paul Maloy32301902015-10-22 08:51:37 -0400534 return true;
535}
536
Per Lidenb97bf3f2006-01-02 19:04:38 +0100537/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400538 * tipc_link_fsm_evt - link finite state machine
539 * @l: pointer to link
540 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400541 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400542int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400543{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400544 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400545
546 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400547 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400548 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400549 case LINK_PEER_RESET_EVT:
550 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400551 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400552 case LINK_RESET_EVT:
553 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400554 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400555 case LINK_FAILURE_EVT:
556 case LINK_FAILOVER_BEGIN_EVT:
557 case LINK_ESTABLISH_EVT:
558 case LINK_FAILOVER_END_EVT:
559 case LINK_SYNCH_BEGIN_EVT:
560 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400561 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400562 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400563 }
564 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400565 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400566 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400567 case LINK_PEER_RESET_EVT:
568 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400569 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400570 case LINK_FAILOVER_BEGIN_EVT:
571 l->state = LINK_FAILINGOVER;
572 case LINK_FAILURE_EVT:
573 case LINK_RESET_EVT:
574 case LINK_ESTABLISH_EVT:
575 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400576 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400577 case LINK_SYNCH_BEGIN_EVT:
578 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400579 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400580 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400581 }
582 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400583 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400584 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400585 case LINK_RESET_EVT:
586 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400587 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400588 case LINK_PEER_RESET_EVT:
589 case LINK_ESTABLISH_EVT:
590 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400591 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400592 case LINK_SYNCH_BEGIN_EVT:
593 case LINK_SYNCH_END_EVT:
594 case LINK_FAILOVER_BEGIN_EVT:
595 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400596 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400597 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400598 }
599 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400600 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400601 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400602 case LINK_FAILOVER_END_EVT:
603 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400604 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400605 case LINK_PEER_RESET_EVT:
606 case LINK_RESET_EVT:
607 case LINK_ESTABLISH_EVT:
608 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400609 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400610 case LINK_FAILOVER_BEGIN_EVT:
611 case LINK_SYNCH_BEGIN_EVT:
612 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400613 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400614 goto illegal_evt;
615 }
616 break;
617 case LINK_ESTABLISHING:
618 switch (evt) {
619 case LINK_ESTABLISH_EVT:
620 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400621 break;
622 case LINK_FAILOVER_BEGIN_EVT:
623 l->state = LINK_FAILINGOVER;
624 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400625 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400626 l->state = LINK_RESET;
627 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400628 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400629 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400630 case LINK_SYNCH_BEGIN_EVT:
631 case LINK_FAILOVER_END_EVT:
632 break;
633 case LINK_SYNCH_END_EVT:
634 default:
635 goto illegal_evt;
636 }
637 break;
638 case LINK_ESTABLISHED:
639 switch (evt) {
640 case LINK_PEER_RESET_EVT:
641 l->state = LINK_PEER_RESET;
642 rc |= TIPC_LINK_DOWN_EVT;
643 break;
644 case LINK_FAILURE_EVT:
645 l->state = LINK_RESETTING;
646 rc |= TIPC_LINK_DOWN_EVT;
647 break;
648 case LINK_RESET_EVT:
649 l->state = LINK_RESET;
650 break;
651 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400652 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400653 break;
654 case LINK_SYNCH_BEGIN_EVT:
655 l->state = LINK_SYNCHING;
656 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400657 case LINK_FAILOVER_BEGIN_EVT:
658 case LINK_FAILOVER_END_EVT:
659 default:
660 goto illegal_evt;
661 }
662 break;
663 case LINK_SYNCHING:
664 switch (evt) {
665 case LINK_PEER_RESET_EVT:
666 l->state = LINK_PEER_RESET;
667 rc |= TIPC_LINK_DOWN_EVT;
668 break;
669 case LINK_FAILURE_EVT:
670 l->state = LINK_RESETTING;
671 rc |= TIPC_LINK_DOWN_EVT;
672 break;
673 case LINK_RESET_EVT:
674 l->state = LINK_RESET;
675 break;
676 case LINK_ESTABLISH_EVT:
677 case LINK_SYNCH_BEGIN_EVT:
678 break;
679 case LINK_SYNCH_END_EVT:
680 l->state = LINK_ESTABLISHED;
681 break;
682 case LINK_FAILOVER_BEGIN_EVT:
683 case LINK_FAILOVER_END_EVT:
684 default:
685 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400686 }
687 break;
688 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400689 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400690 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400691 return rc;
692illegal_evt:
693 pr_err("Illegal FSM event %x in state %x on link %s\n",
694 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400695 return rc;
696}
697
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400698/* link_profile_stats - update statistical profiling of traffic
699 */
700static void link_profile_stats(struct tipc_link *l)
701{
702 struct sk_buff *skb;
703 struct tipc_msg *msg;
704 int length;
705
706 /* Update counters used in statistical profiling of send traffic */
707 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
708 l->stats.queue_sz_counts++;
709
710 skb = skb_peek(&l->transmq);
711 if (!skb)
712 return;
713 msg = buf_msg(skb);
714 length = msg_size(msg);
715
716 if (msg_user(msg) == MSG_FRAGMENTER) {
717 if (msg_type(msg) != FIRST_FRAGMENT)
718 return;
719 length = msg_size(msg_get_wrapped(msg));
720 }
721 l->stats.msg_lengths_total += length;
722 l->stats.msg_length_counts++;
723 if (length <= 64)
724 l->stats.msg_length_profile[0]++;
725 else if (length <= 256)
726 l->stats.msg_length_profile[1]++;
727 else if (length <= 1024)
728 l->stats.msg_length_profile[2]++;
729 else if (length <= 4096)
730 l->stats.msg_length_profile[3]++;
731 else if (length <= 16384)
732 l->stats.msg_length_profile[4]++;
733 else if (length <= 32768)
734 l->stats.msg_length_profile[5]++;
735 else
736 l->stats.msg_length_profile[6]++;
737}
738
739/* tipc_link_timeout - perform periodic task as instructed from node timeout
740 */
741int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
742{
Ying Xuec91522f2016-06-15 14:11:31 +0800743 int mtyp = 0;
744 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400745 bool state = false;
746 bool probe = false;
747 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400748 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
749 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400750 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400751
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400752 switch (l->state) {
753 case LINK_ESTABLISHED:
754 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400755 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400756 link_profile_stats(l);
757 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
758 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
759 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400760 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400761 state |= l->bc_rcvlink->rcv_unacked;
762 state |= l->rcv_unacked;
763 state |= !skb_queue_empty(&l->transmq);
764 state |= !skb_queue_empty(&l->deferdq);
765 probe = mstate->probing;
766 probe |= l->silent_intv_cnt;
767 if (probe || mstate->monitoring)
768 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400769 break;
770 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400771 setup = l->rst_cnt++ <= 4;
772 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400773 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400774 break;
775 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400776 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400777 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400778 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400779 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400780 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400781 case LINK_FAILINGOVER:
782 break;
783 default:
784 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400785 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400786
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400787 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100788 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400789
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400790 return rc;
791}
792
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400793/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400794 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500795 * @l: congested link
796 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400797 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100798 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500799static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100800{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500801 u32 dnode = tipc_own_addr(l->net);
802 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400803 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100804
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400805 /* Create and schedule wakeup pseudo message */
806 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500807 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400808 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400809 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500810 msg_set_dest_droppable(buf_msg(skb), true);
811 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
812 skb_queue_tail(&l->wakeupq, skb);
813 l->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400814 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100815}
816
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400817/**
818 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500819 * @l: congested link
820 * Wake up a number of waiting users, as permitted by available space
821 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400822 */
YueHaibinge064cce2018-07-19 17:16:59 +0800823static void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100824{
Ying Xue58d78b32014-11-26 11:41:51 +0800825 struct sk_buff *skb, *tmp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500826 int imp, i = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100827
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400828 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
829 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500830 if (l->backlog[imp].len < l->backlog[imp].limit) {
831 skb_unlink(skb, &l->wakeupq);
832 skb_queue_tail(l->inputq, skb);
833 } else if (i++ > 10) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100834 break;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500835 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100836 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100837}
838
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400839void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100840{
Jon Maloy7ea817f2018-07-10 01:07:36 +0200841 l->in_session = false;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500842 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400843 l->mtu = l->advertised_mtu;
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200844 spin_lock_bh(&l->wakeupq.lock);
845 spin_lock_bh(&l->inputq->lock);
846 skb_queue_splice_init(&l->wakeupq, l->inputq);
847 spin_unlock_bh(&l->inputq->lock);
848 spin_unlock_bh(&l->wakeupq.lock);
849
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400850 __skb_queue_purge(&l->transmq);
851 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400852 __skb_queue_purge(&l->backlogq);
853 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
854 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
855 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
856 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
857 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400858 kfree_skb(l->reasm_buf);
859 kfree_skb(l->failover_reasm_skb);
860 l->reasm_buf = NULL;
861 l->failover_reasm_skb = NULL;
862 l->rcv_unacked = 0;
863 l->snd_nxt = 1;
864 l->rcv_nxt = 1;
Jon Maloy9012de52018-07-10 01:07:35 +0200865 l->snd_nxt_state = 1;
866 l->rcv_nxt_state = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400867 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400868 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400869 l->rst_cnt = 0;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200870 l->stale_cnt = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400871 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400872 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500873 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100874}
875
Per Lidenb97bf3f2006-01-02 19:04:38 +0100876/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400877 * tipc_link_xmit(): enqueue buffer list according to queue situation
878 * @link: link to use
879 * @list: chain of buffers containing message
880 * @xmitq: returned list of packets to be sent by caller
881 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500882 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400883 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
884 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
885 */
886int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
887 struct sk_buff_head *xmitq)
888{
889 struct tipc_msg *hdr = buf_msg(skb_peek(list));
890 unsigned int maxwin = l->window;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500891 int imp = msg_importance(hdr);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400892 unsigned int mtu = l->mtu;
893 u16 ack = l->rcv_nxt - 1;
894 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400895 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400896 struct sk_buff_head *transmq = &l->transmq;
897 struct sk_buff_head *backlogq = &l->backlogq;
898 struct sk_buff *skb, *_skb, *bskb;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500899 int pkt_cnt = skb_queue_len(list);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500900 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400901
Richard Alpe4952cd32016-02-11 10:43:15 +0100902 if (unlikely(msg_size(hdr) > mtu)) {
903 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400904 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100905 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400906
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500907 /* Allow oversubscription of one data msg per source at congestion */
908 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
909 if (imp == TIPC_SYSTEM_IMPORTANCE) {
910 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
911 return -ENOBUFS;
912 }
913 rc = link_schedule_user(l, hdr);
914 }
915
Jon Paul Maloy95901122016-11-25 10:35:02 -0500916 if (pkt_cnt > 1) {
917 l->stats.sent_fragmented++;
918 l->stats.sent_fragments += pkt_cnt;
919 }
920
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400921 /* Prepare each packet for sending, and add to relevant queue: */
922 while (skb_queue_len(list)) {
923 skb = skb_peek(list);
924 hdr = buf_msg(skb);
925 msg_set_seqno(hdr, seqno);
926 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400927 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400928
929 if (likely(skb_queue_len(transmq) < maxwin)) {
930 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100931 if (!_skb) {
932 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400933 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100934 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400935 __skb_dequeue(list);
936 __skb_queue_tail(transmq, skb);
937 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400938 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400939 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500940 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400941 seqno++;
942 continue;
943 }
944 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
945 kfree_skb(__skb_dequeue(list));
946 l->stats.sent_bundled++;
947 continue;
948 }
949 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
950 kfree_skb(__skb_dequeue(list));
951 __skb_queue_tail(backlogq, bskb);
952 l->backlog[msg_importance(buf_msg(bskb))].len++;
953 l->stats.sent_bundled++;
954 l->stats.sent_bundles++;
955 continue;
956 }
957 l->backlog[imp].len += skb_queue_len(list);
958 skb_queue_splice_tail_init(list, backlogq);
959 }
960 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500961 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400962}
963
YueHaibinge064cce2018-07-19 17:16:59 +0800964static void tipc_link_advance_backlog(struct tipc_link *l,
965 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400966{
967 struct sk_buff *skb, *_skb;
968 struct tipc_msg *hdr;
969 u16 seqno = l->snd_nxt;
970 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400971 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400972
973 while (skb_queue_len(&l->transmq) < l->window) {
974 skb = skb_peek(&l->backlogq);
975 if (!skb)
976 break;
977 _skb = skb_clone(skb, GFP_ATOMIC);
978 if (!_skb)
979 break;
980 __skb_dequeue(&l->backlogq);
981 hdr = buf_msg(skb);
982 l->backlog[msg_importance(hdr)].len--;
983 __skb_queue_tail(&l->transmq, skb);
984 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400985 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400986 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400987 msg_set_ack(hdr, ack);
988 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400989 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500990 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400991 seqno++;
992 }
993 l->snd_nxt = seqno;
994}
995
Jon Paul Maloy52666982015-10-22 08:51:41 -0400996static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700997{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400998 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700999
Jon Paul Maloy52666982015-10-22 08:51:41 -04001000 pr_warn("Retransmission failure on link <%s>\n", l->name);
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001001 link_print(l, "State of link ");
Jon Paul Maloy52666982015-10-22 08:51:41 -04001002 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1003 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1004 pr_info("sqno %u, prev: %x, src: %x\n",
1005 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -07001006}
1007
Jon Maloya4dc70d2018-07-06 15:22:36 +02001008/* tipc_link_retrans() - retransmit one or more packets
1009 * @l: the link to transmit on
1010 * @r: the receiving link ordering the retransmit. Same as l if unicast
1011 * @from: retransmit from (inclusive) this sequence number
1012 * @to: retransmit to (inclusive) this sequence number
1013 * xmitq: queue for accumulating the retransmitted packets
1014 */
YueHaibinge064cce2018-07-19 17:16:59 +08001015static int tipc_link_retrans(struct tipc_link *l, struct tipc_link *r,
1016 u16 from, u16 to, struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001017{
1018 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001019 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Maloya4dc70d2018-07-06 15:22:36 +02001020 u16 ack = l->rcv_nxt - 1;
1021 struct tipc_msg *hdr;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001022
1023 if (!skb)
1024 return 0;
1025
1026 /* Detect repeated retransmit failures on same packet */
Jon Maloya4dc70d2018-07-06 15:22:36 +02001027 if (r->last_retransm != buf_seqno(skb)) {
1028 r->last_retransm = buf_seqno(skb);
1029 r->stale_limit = jiffies + msecs_to_jiffies(l->tolerance);
1030 } else if (++r->stale_cnt > 99 && time_after(jiffies, r->stale_limit)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001031 link_retransmit_failure(l, skb);
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001032 if (link_is_bc_sndlink(l))
1033 return TIPC_LINK_DOWN_EVT;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001034 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001035 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001036
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001037 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001038 hdr = buf_msg(skb);
Jon Maloya4dc70d2018-07-06 15:22:36 +02001039 if (less(msg_seqno(hdr), from))
1040 continue;
1041 if (more(msg_seqno(hdr), to))
1042 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001043 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1044 if (!_skb)
1045 return 0;
1046 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001047 msg_set_ack(hdr, ack);
1048 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001049 _skb->priority = TC_PRIO_CONTROL;
1050 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001051 l->stats.retransmitted++;
1052 }
1053 return 0;
1054}
1055
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001056/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001057 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001058 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001059 * Node lock must be held
1060 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001061static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001062 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001063{
Jon Maloy399574d2017-10-13 11:04:32 +02001064 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001065 struct tipc_msg *hdr = buf_msg(skb);
1066
1067 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001068 case TIPC_LOW_IMPORTANCE:
1069 case TIPC_MEDIUM_IMPORTANCE:
1070 case TIPC_HIGH_IMPORTANCE:
1071 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001072 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001073 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001074 return true;
1075 }
Gustavo A. R. Silvac53e0c72018-07-04 16:13:59 -05001076 /* else: fall through */
Jon Maloy2f487712017-10-13 11:04:31 +02001077 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001078 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001079 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001080 case GROUP_PROTOCOL:
1081 skb_queue_tail(mc_inputq, skb);
1082 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001083 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001084 l->bc_rcvlink->state = LINK_ESTABLISHED;
1085 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001086 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001087 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001088 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001089 case MSG_FRAGMENTER:
1090 case BCAST_PROTOCOL:
1091 return false;
1092 default:
1093 pr_warn("Dropping received illegal msg type\n");
1094 kfree_skb(skb);
1095 return false;
1096 };
1097}
1098
1099/* tipc_link_input - process packet that has passed link protocol check
1100 *
1101 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001102 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001103static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1104 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001105{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001106 struct tipc_msg *hdr = buf_msg(skb);
1107 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001108 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001109 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001110 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001111 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001112 int pos = 0;
1113 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001114
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001115 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1116 if (msg_type(hdr) == SYNCH_MSG) {
1117 __skb_queue_purge(&l->deferdq);
1118 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001119 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001120 if (!tipc_msg_extract(skb, &iskb, &ipos))
1121 return rc;
1122 kfree_skb(skb);
1123 skb = iskb;
1124 hdr = buf_msg(skb);
1125 if (less(msg_seqno(hdr), l->drop_point))
1126 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001127 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001128 return rc;
1129 usr = msg_user(hdr);
1130 reasm_skb = &l->failover_reasm_skb;
1131 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001132
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001133 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001134 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001135 l->stats.recv_bundles++;
1136 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001137 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001138 tipc_data_input(l, iskb, &tmpq);
1139 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001140 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001141 } else if (usr == MSG_FRAGMENTER) {
1142 l->stats.recv_fragments++;
1143 if (tipc_buf_append(reasm_skb, &skb)) {
1144 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001145 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001146 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1147 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001148 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001149 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001150 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001151 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001152 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001153 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001154 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001155 }
1156drop:
1157 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001158 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001159}
1160
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001161static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1162{
1163 bool released = false;
1164 struct sk_buff *skb, *tmp;
1165
1166 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1167 if (more(buf_seqno(skb), acked))
1168 break;
1169 __skb_unlink(skb, &l->transmq);
1170 kfree_skb(skb);
1171 released = true;
1172 }
1173 return released;
1174}
1175
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001176/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001177 *
1178 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1179 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001180 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001181int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001182{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001183 if (!l)
1184 return 0;
1185
1186 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1187 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001188 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001189 return 0;
1190 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001191
1192 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1193 l->snd_nxt = l->rcv_nxt;
1194 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001195 }
1196
1197 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001198 l->rcv_unacked = 0;
1199 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001200 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001201 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001202}
1203
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001204/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1205 */
1206void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1207{
1208 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001209 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001210
1211 if (l->state == LINK_ESTABLISHING)
1212 mtyp = ACTIVATE_MSG;
1213
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001214 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001215
1216 /* Inform peer that this endpoint is going down if applicable */
1217 skb = skb_peek_tail(xmitq);
1218 if (skb && (l->state == LINK_RESET))
1219 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001220}
1221
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001222/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001223 * Note that sending of broadcast NACK is coordinated among nodes, to
1224 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001225 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001226static int tipc_link_build_nack_msg(struct tipc_link *l,
1227 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001228{
1229 u32 def_cnt = ++l->stats.deferred_recv;
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001230 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001231
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001232 if (link_is_bc_rcvlink(l)) {
1233 match1 = def_cnt & 0xf;
1234 match2 = tipc_own_addr(l->net) & 0xf;
1235 if (match1 == match2)
1236 return TIPC_LINK_SND_STATE;
1237 return 0;
1238 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001239
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001240 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001241 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001242 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001243}
1244
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001245/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001246 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001247 * @skb: TIPC packet
1248 * @xmitq: queue to place packets to be sent after this call
1249 */
1250int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1251 struct sk_buff_head *xmitq)
1252{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001253 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001254 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001255 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001256 int rc = 0;
1257
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001258 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001259 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001260 seqno = msg_seqno(hdr);
1261 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001262 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001263
1264 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001265 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1266 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001267
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001268 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001269 if (l->state == LINK_ESTABLISHING)
1270 rc = TIPC_LINK_UP_EVT;
1271 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001272 }
1273
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001274 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001275 l->silent_intv_cnt = 0;
1276
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001277 /* Drop if outside receive window */
1278 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1279 l->stats.duplicates++;
1280 goto drop;
1281 }
1282
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001283 /* Forward queues and wake up waiting users */
1284 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
Jon Maloya4dc70d2018-07-06 15:22:36 +02001285 l->stale_cnt = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001286 tipc_link_advance_backlog(l, xmitq);
1287 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1288 link_prepare_wakeup(l);
1289 }
1290
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001291 /* Defer delivery if sequence gap */
1292 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001293 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001294 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001295 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001296 }
1297
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001298 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001299 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001300 l->stats.recv_pkts++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001301 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001302 rc |= tipc_link_input(l, skb, l->inputq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001303 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001304 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001305 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001306 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001307 } while ((skb = __skb_dequeue(defq)));
1308
1309 return rc;
1310drop:
1311 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001312 return rc;
1313}
1314
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001315static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001316 bool probe_reply, u16 rcvgap,
1317 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001318 struct sk_buff_head *xmitq)
1319{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001320 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001321 struct sk_buff *skb;
1322 struct tipc_msg *hdr;
1323 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001324 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001325 struct tipc_mon_state *mstate = &l->mon_state;
1326 int dlen = 0;
1327 void *data;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001328
1329 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001330 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001331 return;
1332
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001333 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1334 return;
1335
1336 if (!skb_queue_empty(dfq))
1337 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1338
1339 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001340 tipc_max_domain_size, l->addr,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001341 tipc_own_addr(l->net), 0, 0, 0);
1342 if (!skb)
1343 return;
1344
1345 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001346 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001347 msg_set_session(hdr, l->session);
1348 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001349 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001350 msg_set_next_sent(hdr, l->snd_nxt);
1351 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001352 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001353 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001354 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001355 msg_set_link_tolerance(hdr, tolerance);
1356 msg_set_linkprio(hdr, priority);
1357 msg_set_redundant_link(hdr, node_up);
1358 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001359 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001360
1361 if (mtyp == STATE_MSG) {
Jon Maloy9012de52018-07-10 01:07:35 +02001362 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1363 msg_set_seqno(hdr, l->snd_nxt_state++);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001364 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001365 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001366 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001367 msg_set_is_keepalive(hdr, probe || probe_reply);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001368 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1369 msg_set_size(hdr, INT_H_SIZE + dlen);
1370 skb_trim(skb, INT_H_SIZE + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001371 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001372 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001373 } else {
1374 /* RESET_MSG or ACTIVATE_MSG */
1375 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001376 strcpy(data, l->if_name);
1377 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1378 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001379 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001380 if (probe)
1381 l->stats.sent_probes++;
1382 if (rcvgap)
1383 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001384 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001385 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001386}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001387
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001388/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001389 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001390 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001391void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1392 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001393{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001394 struct sk_buff *skb, *tnlskb;
1395 struct tipc_msg *hdr, tnlhdr;
1396 struct sk_buff_head *queue = &l->transmq;
1397 struct sk_buff_head tmpxq, tnlq;
1398 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001399
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001400 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001401 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001402
1403 skb_queue_head_init(&tnlq);
1404 skb_queue_head_init(&tmpxq);
1405
1406 /* At least one packet required for safe algorithm => add dummy */
1407 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001408 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001409 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001410 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001411 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001412 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001413 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001414 skb_queue_tail(&tnlq, skb);
1415 tipc_link_xmit(l, &tnlq, &tmpxq);
1416 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001417
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001418 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001419 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001420 mtyp, INT_H_SIZE, l->addr);
1421 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1422 msg_set_msgcnt(&tnlhdr, pktcnt);
1423 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1424tnl:
1425 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001426 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001427 hdr = buf_msg(skb);
1428 if (queue == &l->backlogq)
1429 msg_set_seqno(hdr, seqno++);
1430 pktlen = msg_size(hdr);
1431 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01001432 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001433 if (!tnlskb) {
1434 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001435 return;
1436 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001437 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1438 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1439 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001440 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001441 if (queue != &l->backlogq) {
1442 queue = &l->backlogq;
1443 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001444 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001445
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001446 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001447
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001448 if (mtyp == FAILOVER_MSG) {
1449 tnl->drop_point = l->rcv_nxt;
1450 tnl->failover_reasm_skb = l->reasm_buf;
1451 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001452 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001453}
1454
Jon Maloy7ea817f2018-07-10 01:07:36 +02001455/* tipc_link_validate_msg(): validate message against current link state
1456 * Returns true if message should be accepted, otherwise false
1457 */
1458bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1459{
1460 u16 curr_session = l->peer_session;
1461 u16 session = msg_session(hdr);
1462 int mtyp = msg_type(hdr);
1463
1464 if (msg_user(hdr) != LINK_PROTOCOL)
1465 return true;
1466
1467 switch (mtyp) {
1468 case RESET_MSG:
1469 if (!l->in_session)
1470 return true;
1471 /* Accept only RESET with new session number */
1472 return more(session, curr_session);
1473 case ACTIVATE_MSG:
1474 if (!l->in_session)
1475 return true;
1476 /* Accept only ACTIVATE with new or current session number */
1477 return !less(session, curr_session);
1478 case STATE_MSG:
1479 /* Accept only STATE with current session number */
1480 if (!l->in_session)
1481 return false;
1482 if (session != curr_session)
1483 return false;
1484 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1485 return true;
1486 /* Accept only STATE with new sequence number */
1487 return !less(msg_seqno(hdr), l->rcv_nxt_state);
1488 default:
1489 return false;
1490 }
1491}
1492
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001493/* tipc_link_proto_rcv(): receive link level protocol message :
1494 * Note that network plane id propagates through the network, and may
1495 * change at any time. The node with lowest numerical id determines
1496 * network plane
1497 */
1498static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1499 struct sk_buff_head *xmitq)
1500{
1501 struct tipc_msg *hdr = buf_msg(skb);
1502 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001503 u16 ack = msg_ack(hdr);
1504 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001505 u16 peers_snd_nxt = msg_next_sent(hdr);
1506 u16 peers_tol = msg_link_tolerance(hdr);
1507 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001508 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001509 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001510 int mtyp = msg_type(hdr);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001511 bool reply = msg_probe(hdr);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001512 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001513 char *if_name;
1514 int rc = 0;
1515
Jon Paul Maloy52666982015-10-22 08:51:41 -04001516 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001517 goto exit;
1518
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001519 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001520 l->net_plane = msg_net_plane(hdr);
1521
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001522 skb_linearize(skb);
1523 hdr = buf_msg(skb);
1524 data = msg_data(hdr);
1525
Jon Maloy7ea817f2018-07-10 01:07:36 +02001526 if (!tipc_link_validate_msg(l, hdr))
1527 goto exit;
1528
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001529 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001530 case RESET_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001531 case ACTIVATE_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001532 /* Complete own link name with peer's interface name */
1533 if_name = strrchr(l->name, ':') + 1;
1534 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1535 break;
1536 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1537 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001538 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001539
1540 /* Update own tolerance if peer indicates a non-zero value */
1541 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1542 l->tolerance = peers_tol;
1543
1544 /* Update own priority if peer's priority is higher */
1545 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1546 l->priority = peers_prio;
1547
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001548 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001549 if (msg_peer_stopping(hdr))
1550 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1551 else if ((mtyp == RESET_MSG) || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001552 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1553
1554 /* ACTIVATE_MSG takes up link if it was already locally reset */
1555 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1556 rc = TIPC_LINK_UP_EVT;
1557
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001558 l->peer_session = msg_session(hdr);
Jon Maloy7ea817f2018-07-10 01:07:36 +02001559 l->in_session = true;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001560 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001561 if (l->mtu > msg_max_pkt(hdr))
1562 l->mtu = msg_max_pkt(hdr);
1563 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001564
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001565 case STATE_MSG:
Jon Maloy9012de52018-07-10 01:07:35 +02001566 l->rcv_nxt_state = msg_seqno(hdr) + 1;
1567
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001568 /* Update own tolerance if peer indicates a non-zero value */
1569 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1570 l->tolerance = peers_tol;
1571
Jon Paul Maloyf7967552016-11-23 21:05:26 -05001572 /* Update own prio if peer indicates a different value */
1573 if ((peers_prio != l->priority) &&
1574 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01001575 l->priority = peers_prio;
1576 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1577 }
1578
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001579 l->silent_intv_cnt = 0;
1580 l->stats.recv_states++;
1581 if (msg_probe(hdr))
1582 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001583
1584 if (!link_is_up(l)) {
1585 if (l->state == LINK_ESTABLISHING)
1586 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001587 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001588 }
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001589 tipc_mon_rcv(l->net, data, dlen, l->addr,
1590 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001591
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001592 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001593 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001594 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001595 if (rcvgap || reply)
1596 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1597 rcvgap, 0, 0, xmitq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001598 tipc_link_release_pkts(l, ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001599
1600 /* If NACK, retransmit will now start at right position */
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001601 if (gap) {
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001602 rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001603 l->stats.recv_nacks++;
1604 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001605
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001606 tipc_link_advance_backlog(l, xmitq);
1607 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1608 link_prepare_wakeup(l);
1609 }
1610exit:
1611 kfree_skb(skb);
1612 return rc;
1613}
1614
Jon Paul Maloy52666982015-10-22 08:51:41 -04001615/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1616 */
1617static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1618 u16 peers_snd_nxt,
1619 struct sk_buff_head *xmitq)
1620{
1621 struct sk_buff *skb;
1622 struct tipc_msg *hdr;
1623 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1624 u16 ack = l->rcv_nxt - 1;
1625 u16 gap_to = peers_snd_nxt - 1;
1626
1627 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001628 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001629 if (!skb)
1630 return false;
1631 hdr = buf_msg(skb);
1632 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1633 msg_set_bcast_ack(hdr, ack);
1634 msg_set_bcgap_after(hdr, ack);
1635 if (dfrd_skb)
1636 gap_to = buf_seqno(dfrd_skb) - 1;
1637 msg_set_bcgap_to(hdr, gap_to);
1638 msg_set_non_seq(hdr, bcast);
1639 __skb_queue_tail(xmitq, skb);
1640 return true;
1641}
1642
1643/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1644 *
1645 * Give a newly added peer node the sequence number where it should
1646 * start receiving and acking broadcast packets.
1647 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001648static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1649 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001650{
1651 struct sk_buff_head list;
1652
1653 __skb_queue_head_init(&list);
1654 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1655 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001656 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001657 tipc_link_xmit(l, &list, xmitq);
1658}
1659
1660/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1661 */
1662void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1663{
1664 int mtyp = msg_type(hdr);
1665 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1666
1667 if (link_is_up(l))
1668 return;
1669
1670 if (msg_user(hdr) == BCAST_PROTOCOL) {
1671 l->rcv_nxt = peers_snd_nxt;
1672 l->state = LINK_ESTABLISHED;
1673 return;
1674 }
1675
1676 if (l->peer_caps & TIPC_BCAST_SYNCH)
1677 return;
1678
1679 if (msg_peer_node_is_up(hdr))
1680 return;
1681
1682 /* Compatibility: accept older, less safe initial synch data */
1683 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1684 l->rcv_nxt = peers_snd_nxt;
1685}
1686
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001687/* link_bc_retr eval()- check if the indicated range can be retransmitted now
1688 * - Adjust permitted range if there is overlap with previous retransmission
1689 */
1690static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1691{
1692 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1693
1694 if (less(*to, *from))
1695 return false;
1696
1697 /* New retransmission request */
1698 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1699 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1700 l->prev_from = *from;
1701 l->prev_to = *to;
1702 l->prev_retr = jiffies;
1703 return true;
1704 }
1705
1706 /* Inside range of previous retransmit */
1707 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1708 return false;
1709
1710 /* Fully or partially outside previous range => exclude overlap */
1711 if (less(*from, l->prev_from)) {
1712 *to = l->prev_from - 1;
1713 l->prev_from = *from;
1714 }
1715 if (more(*to, l->prev_to)) {
1716 *from = l->prev_to + 1;
1717 l->prev_to = *to;
1718 }
1719 l->prev_retr = jiffies;
1720 return true;
1721}
1722
Jon Paul Maloy52666982015-10-22 08:51:41 -04001723/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1724 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001725int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1726 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001727{
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001728 struct tipc_link *snd_l = l->bc_sndlink;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001729 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001730 u16 from = msg_bcast_ack(hdr) + 1;
1731 u16 to = from + msg_bc_gap(hdr) - 1;
1732 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001733
1734 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001735 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001736
1737 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001738 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001739
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04001740 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1741 if (msg_ack(hdr))
1742 l->bc_peer_is_up = true;
1743
1744 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001745 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001746
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001747 l->stats.recv_nacks++;
1748
Jon Paul Maloy52666982015-10-22 08:51:41 -04001749 /* Ignore if peers_snd_nxt goes beyond receive window */
1750 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001751 return rc;
1752
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001753 if (link_bc_retr_eval(snd_l, &from, &to))
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001754 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001755
1756 l->snd_nxt = peers_snd_nxt;
1757 if (link_bc_rcv_gap(l))
1758 rc |= TIPC_LINK_SND_STATE;
1759
1760 /* Return now if sender supports nack via STATE messages */
1761 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1762 return rc;
1763
1764 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001765
1766 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1767 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001768 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001769 }
1770
1771 /* Don't NACK if one was recently sent or peeked */
1772 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1773 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001774 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001775 }
1776
1777 /* Conditionally delay NACK sending until next synch rcv */
1778 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1779 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1780 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001781 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001782 }
1783
1784 /* Send NACK now but suppress next one */
1785 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1786 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001787 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001788}
1789
1790void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1791 struct sk_buff_head *xmitq)
1792{
1793 struct sk_buff *skb, *tmp;
1794 struct tipc_link *snd_l = l->bc_sndlink;
1795
1796 if (!link_is_up(l) || !l->bc_peer_is_up)
1797 return;
1798
1799 if (!more(acked, l->acked))
1800 return;
1801
1802 /* Skip over packets peer has already acked */
1803 skb_queue_walk(&snd_l->transmq, skb) {
1804 if (more(buf_seqno(skb), l->acked))
1805 break;
1806 }
1807
1808 /* Update/release the packets peer is acking now */
1809 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1810 if (more(buf_seqno(skb), acked))
1811 break;
1812 if (!--TIPC_SKB_CB(skb)->ackers) {
1813 __skb_unlink(skb, &snd_l->transmq);
1814 kfree_skb(skb);
1815 }
1816 }
1817 l->acked = acked;
1818 tipc_link_advance_backlog(snd_l, xmitq);
1819 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1820 link_prepare_wakeup(snd_l);
1821}
1822
1823/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001824 * This function is here for backwards compatibility, since
1825 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04001826 */
1827int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1828 struct sk_buff_head *xmitq)
1829{
1830 struct tipc_msg *hdr = buf_msg(skb);
1831 u32 dnode = msg_destnode(hdr);
1832 int mtyp = msg_type(hdr);
1833 u16 acked = msg_bcast_ack(hdr);
1834 u16 from = acked + 1;
1835 u16 to = msg_bcgap_to(hdr);
1836 u16 peers_snd_nxt = to + 1;
1837 int rc = 0;
1838
1839 kfree_skb(skb);
1840
1841 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1842 return 0;
1843
1844 if (mtyp != STATE_MSG)
1845 return 0;
1846
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001847 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001848 tipc_link_bc_ack_rcv(l, acked, xmitq);
Jon Paul Maloy40501f902017-08-21 17:59:30 +02001849 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001850 l->stats.recv_nacks++;
1851 return rc;
1852 }
1853
1854 /* Msg for other node => suppress own NACK at next sync if applicable */
1855 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1856 l->nack_state = BC_NACK_SND_SUPPRESS;
1857
1858 return 0;
1859}
1860
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001861void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001862{
Jon Maloy218527f2018-03-29 23:20:41 +02001863 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001864
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001865 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04001866 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1867 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1868 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1869 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001870 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001871}
1872
Allan Stephens5c216e12011-10-18 11:34:29 -04001873/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001874 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05001875 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01001876 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001877void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001878{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001879 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001880}
1881
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001882static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001883{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001884 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001885 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001886 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001887
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001888 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001889 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1890 skb_queue_len(&l->transmq), head, tail,
1891 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001892}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001893
1894/* Parse and validate nested (link) properties valid for media, bearer and link
1895 */
1896int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1897{
1898 int err;
1899
1900 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
Johannes Bergfceb6432017-04-12 14:34:07 +02001901 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01001902 if (err)
1903 return err;
1904
1905 if (props[TIPC_NLA_PROP_PRIO]) {
1906 u32 prio;
1907
1908 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1909 if (prio > TIPC_MAX_LINK_PRI)
1910 return -EINVAL;
1911 }
1912
1913 if (props[TIPC_NLA_PROP_TOL]) {
1914 u32 tol;
1915
1916 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1917 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1918 return -EINVAL;
1919 }
1920
1921 if (props[TIPC_NLA_PROP_WIN]) {
1922 u32 win;
1923
1924 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1925 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1926 return -EINVAL;
1927 }
1928
1929 return 0;
1930}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001931
Richard Alped8182802014-11-24 11:10:29 +01001932static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001933{
1934 int i;
1935 struct nlattr *stats;
1936
1937 struct nla_map {
1938 u32 key;
1939 u32 val;
1940 };
1941
1942 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05001943 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01001944 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1945 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1946 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1947 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05001948 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01001949 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1950 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1951 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1952 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1953 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1954 s->msg_length_counts : 1},
1955 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1956 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1957 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1958 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1959 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1960 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1961 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1962 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1963 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1964 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1965 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1966 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1967 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1968 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1969 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1970 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1971 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1972 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1973 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1974 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1975 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1976 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1977 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1978 };
1979
1980 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1981 if (!stats)
1982 return -EMSGSIZE;
1983
1984 for (i = 0; i < ARRAY_SIZE(map); i++)
1985 if (nla_put_u32(skb, map[i].key, map[i].val))
1986 goto msg_full;
1987
1988 nla_nest_end(skb, stats);
1989
1990 return 0;
1991msg_full:
1992 nla_nest_cancel(skb, stats);
1993
1994 return -EMSGSIZE;
1995}
1996
1997/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001998int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1999 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002000{
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002001 u32 self = tipc_own_addr(net);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002002 struct nlattr *attrs;
2003 struct nlattr *prop;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002004 void *hdr;
2005 int err;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002006
Richard Alpebfb3e5d2015-02-09 09:50:03 +01002007 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002008 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002009 if (!hdr)
2010 return -EMSGSIZE;
2011
2012 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2013 if (!attrs)
2014 goto msg_full;
2015
2016 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2017 goto attr_msg_full;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002018 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002019 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04002020 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002021 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002022 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002023 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002024 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002025 goto attr_msg_full;
2026
2027 if (tipc_link_is_up(link))
2028 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2029 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04002030 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002031 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2032 goto attr_msg_full;
2033
2034 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2035 if (!prop)
2036 goto attr_msg_full;
2037 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2038 goto prop_msg_full;
2039 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2040 goto prop_msg_full;
2041 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002042 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002043 goto prop_msg_full;
2044 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2045 goto prop_msg_full;
2046 nla_nest_end(msg->skb, prop);
2047
2048 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2049 if (err)
2050 goto attr_msg_full;
2051
2052 nla_nest_end(msg->skb, attrs);
2053 genlmsg_end(msg->skb, hdr);
2054
2055 return 0;
2056
2057prop_msg_full:
2058 nla_nest_cancel(msg->skb, prop);
2059attr_msg_full:
2060 nla_nest_cancel(msg->skb, attrs);
2061msg_full:
2062 genlmsg_cancel(msg->skb, hdr);
2063
2064 return -EMSGSIZE;
2065}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002066
2067static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2068 struct tipc_stats *stats)
2069{
2070 int i;
2071 struct nlattr *nest;
2072
2073 struct nla_map {
2074 __u32 key;
2075 __u32 val;
2076 };
2077
2078 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002079 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002080 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2081 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2082 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2083 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002084 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002085 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2086 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2087 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2088 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2089 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2090 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2091 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2092 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2093 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2094 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2095 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2096 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2097 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2098 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2099 };
2100
2101 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2102 if (!nest)
2103 return -EMSGSIZE;
2104
2105 for (i = 0; i < ARRAY_SIZE(map); i++)
2106 if (nla_put_u32(skb, map[i].key, map[i].val))
2107 goto msg_full;
2108
2109 nla_nest_end(skb, nest);
2110
2111 return 0;
2112msg_full:
2113 nla_nest_cancel(skb, nest);
2114
2115 return -EMSGSIZE;
2116}
2117
2118int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2119{
2120 int err;
2121 void *hdr;
2122 struct nlattr *attrs;
2123 struct nlattr *prop;
2124 struct tipc_net *tn = net_generic(net, tipc_net_id);
2125 struct tipc_link *bcl = tn->bcl;
2126
2127 if (!bcl)
2128 return 0;
2129
2130 tipc_bcast_lock(net);
2131
2132 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2133 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002134 if (!hdr) {
2135 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002136 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002137 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002138
2139 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2140 if (!attrs)
2141 goto msg_full;
2142
2143 /* The broadcast link is always up */
2144 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2145 goto attr_msg_full;
2146
2147 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2148 goto attr_msg_full;
2149 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2150 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002151 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002152 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002153 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002154 goto attr_msg_full;
2155
2156 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2157 if (!prop)
2158 goto attr_msg_full;
2159 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2160 goto prop_msg_full;
2161 nla_nest_end(msg->skb, prop);
2162
2163 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2164 if (err)
2165 goto attr_msg_full;
2166
2167 tipc_bcast_unlock(net);
2168 nla_nest_end(msg->skb, attrs);
2169 genlmsg_end(msg->skb, hdr);
2170
2171 return 0;
2172
2173prop_msg_full:
2174 nla_nest_cancel(msg->skb, prop);
2175attr_msg_full:
2176 nla_nest_cancel(msg->skb, attrs);
2177msg_full:
2178 tipc_bcast_unlock(net);
2179 genlmsg_cancel(msg->skb, hdr);
2180
2181 return -EMSGSIZE;
2182}
2183
Richard Alped01332f2016-02-01 08:19:56 +01002184void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2185 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002186{
2187 l->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002188 if (link_is_up(l))
2189 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002190}
2191
Richard Alped01332f2016-02-01 08:19:56 +01002192void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2193 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002194{
2195 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002196 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002197}
2198
2199void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2200{
2201 l->abort_limit = limit;
2202}