blob: 24d4d10756d3e095dde13528c03c627456fc3cde [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Tuong Lienb4b97712018-12-19 09:17:56 +070046#include "trace.h"
Tuong Lienfc1b6d62019-11-08 12:05:11 +070047#include "crypto.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010048
Ying Xue796c75d2013-06-17 10:54:48 -040049#include <linux/pkt_sched.h>
50
Jon Paul Maloy38206d52015-11-19 14:30:46 -050051struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050052 u32 sent_pkts;
53 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050054 u32 sent_states;
55 u32 recv_states;
56 u32 sent_probes;
57 u32 recv_probes;
58 u32 sent_nacks;
59 u32 recv_nacks;
60 u32 sent_acks;
61 u32 sent_bundled;
62 u32 sent_bundles;
63 u32 recv_bundled;
64 u32 recv_bundles;
65 u32 retransmitted;
66 u32 sent_fragmented;
67 u32 sent_fragments;
68 u32 recv_fragmented;
69 u32 recv_fragments;
70 u32 link_congs; /* # port sends blocked by congestion */
71 u32 deferred_recv;
72 u32 duplicates;
73 u32 max_queue_sz; /* send queue size high water mark */
74 u32 accu_queue_sz; /* used for send queue size profiling */
75 u32 queue_sz_counts; /* used for send queue size profiling */
76 u32 msg_length_counts; /* used for message length profiling */
77 u32 msg_lengths_total; /* used for message length profiling */
78 u32 msg_length_profile[7]; /* used for msg. length profiling */
79};
80
81/**
82 * struct tipc_link - TIPC link data structure
83 * @addr: network address of link's peer node
84 * @name: link name character string
85 * @media_addr: media address to use when sending messages over link
86 * @timer: link timer
87 * @net: pointer to namespace struct
88 * @refcnt: reference counter for permanent references (owner node & timer)
89 * @peer_session: link session # being used by peer end of link
90 * @peer_bearer_id: bearer id used by link's peer endpoint
91 * @bearer_id: local bearer id used by link
92 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050093 * @abort_limit: # of unacknowledged continuity probes needed to reset link
94 * @state: current state of link FSM
95 * @peer_caps: bitmap describing capabilities of peer node
96 * @silent_intv_cnt: # of timer intervals without any reception from peer
97 * @proto_msg: template for control messages generated by link
98 * @pmsg: convenience pointer to "proto_msg" field
99 * @priority: current link priority
100 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400101 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500102 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103 * @exp_msg_count: # of tunnelled messages expected during link changeover
104 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105 * @mtu: current maximum packet size for this link
106 * @advertised_mtu: advertised own mtu when link is being established
107 * @transmitq: queue for sent, non-acked messages
108 * @backlogq: queue for messages waiting to be sent
109 * @snt_nxt: next sequence number to use for outbound messages
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500127 struct net *net;
128
129 /* Management and link supervision data */
Jon Maloy7ea817f2018-07-10 01:07:36 +0200130 u16 peer_session;
131 u16 session;
Jon Maloy9012de52018-07-10 01:07:35 +0200132 u16 snd_nxt_state;
133 u16 rcv_nxt_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500134 u32 peer_bearer_id;
135 u32 bearer_id;
136 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500137 u32 abort_limit;
138 u32 state;
139 u16 peer_caps;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200140 bool in_session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500141 bool active;
142 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500143 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500144 u32 priority;
145 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400146 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400147 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500148
149 /* Failover/synch */
150 u16 drop_point;
151 struct sk_buff *failover_reasm_skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +0700152 struct sk_buff_head failover_deferdq;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500153
154 /* Max packet negotiation */
155 u16 mtu;
156 u16 advertised_mtu;
157
158 /* Sending */
159 struct sk_buff_head transmq;
160 struct sk_buff_head backlogq;
161 struct {
162 u16 len;
163 u16 limit;
Tuong Liene95584a2019-10-02 18:49:43 +0700164 struct sk_buff *target_bskb;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500165 } backlog[5];
166 u16 snd_nxt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500167 u16 window;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500168
169 /* Reception */
170 u16 rcv_nxt;
171 u32 rcv_unacked;
172 struct sk_buff_head deferdq;
173 struct sk_buff_head *inputq;
174 struct sk_buff_head *namedq;
175
176 /* Congestion handling */
177 struct sk_buff_head wakeupq;
178
179 /* Fragmentation/reassembly */
180 struct sk_buff *reasm_buf;
Tuong Lien2320bcd2019-07-24 08:56:12 +0700181 struct sk_buff *reasm_tnlmsg;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500182
183 /* Broadcast */
184 u16 ackers;
185 u16 acked;
186 struct tipc_link *bc_rcvlink;
187 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400188 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500189 bool bc_peer_is_up;
190
191 /* Statistics */
192 struct tipc_stats stats;
193};
194
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400195/*
196 * Error message prefixes
197 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400198static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400199static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100200
Jon Paul Maloy52666982015-10-22 08:51:41 -0400201/* Send states for broadcast NACKs
202 */
203enum {
204 BC_NACK_SND_CONDITIONAL,
205 BC_NACK_SND_UNCONDITIONAL,
206 BC_NACK_SND_SUPPRESS,
207};
208
Jon Maloy53962bc2019-06-28 17:06:20 +0200209#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
Tuong Lien382f5982019-04-04 11:09:52 +0700210#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400211
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900212/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400213 * Interval between NACKs when packets arrive out of order
214 */
215#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500216
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400217/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400218 */
219enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400220 LINK_ESTABLISHED = 0xe,
221 LINK_ESTABLISHING = 0xe << 4,
222 LINK_RESET = 0x1 << 8,
223 LINK_RESETTING = 0x2 << 12,
224 LINK_PEER_RESET = 0xd << 16,
225 LINK_FAILINGOVER = 0xf << 20,
226 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400227};
228
229/* Link FSM state checking routines
230 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400231static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400232{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400233 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400234}
235
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400236static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
237 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400238static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100239 bool probe_reply, u16 rcvgap,
240 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400241 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500242static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400243static int tipc_link_build_nack_msg(struct tipc_link *l,
244 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400245static void tipc_link_build_bc_init_msg(struct tipc_link *l,
246 struct sk_buff_head *xmitq);
247static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Tuong Lien91959482019-04-04 11:09:51 +0700248static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
Tuong Lien6a6b5c82019-06-17 12:15:42 +0700249static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
250 struct tipc_gap_ack_blks *ga,
251 struct sk_buff_head *xmitq);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400252
Per Lidenb97bf3f2006-01-02 19:04:38 +0100253/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800254 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100255 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400256bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100257{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400258 return link_is_up(l);
259}
260
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400261bool tipc_link_peer_is_down(struct tipc_link *l)
262{
263 return l->state == LINK_PEER_RESET;
264}
265
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400266bool tipc_link_is_reset(struct tipc_link *l)
267{
268 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
269}
270
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400271bool tipc_link_is_establishing(struct tipc_link *l)
272{
273 return l->state == LINK_ESTABLISHING;
274}
275
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400276bool tipc_link_is_synching(struct tipc_link *l)
277{
278 return l->state == LINK_SYNCHING;
279}
280
281bool tipc_link_is_failingover(struct tipc_link *l)
282{
283 return l->state == LINK_FAILINGOVER;
284}
285
286bool tipc_link_is_blocked(struct tipc_link *l)
287{
288 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100289}
290
Wu Fengguang742e0382015-10-24 22:56:01 +0800291static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400292{
293 return !l->bc_sndlink;
294}
295
Wu Fengguang742e0382015-10-24 22:56:01 +0800296static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400297{
298 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
299}
300
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400301void tipc_link_set_active(struct tipc_link *l, bool active)
302{
303 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100304}
305
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500306u32 tipc_link_id(struct tipc_link *l)
307{
308 return l->peer_bearer_id << 16 | l->bearer_id;
309}
310
311int tipc_link_window(struct tipc_link *l)
312{
313 return l->window;
314}
315
316int tipc_link_prio(struct tipc_link *l)
317{
318 return l->priority;
319}
320
321unsigned long tipc_link_tolerance(struct tipc_link *l)
322{
323 return l->tolerance;
324}
325
326struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
327{
328 return l->inputq;
329}
330
331char tipc_link_plane(struct tipc_link *l)
332{
333 return l->net_plane;
334}
335
Jon Maloy9012de52018-07-10 01:07:35 +0200336void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
337{
338 l->peer_caps = capabilities;
339}
340
Jon Paul Maloy52666982015-10-22 08:51:41 -0400341void tipc_link_add_bc_peer(struct tipc_link *snd_l,
342 struct tipc_link *uc_l,
343 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400344{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400345 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
346
347 snd_l->ackers++;
348 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500349 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400350 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400351}
352
Jon Paul Maloy52666982015-10-22 08:51:41 -0400353void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
354 struct tipc_link *rcv_l,
355 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400356{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400357 u16 ack = snd_l->snd_nxt - 1;
358
359 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400360 rcv_l->bc_peer_is_up = true;
361 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400362 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
Tuong Lien26574db2018-12-19 09:17:57 +0700363 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400364 tipc_link_reset(rcv_l);
365 rcv_l->state = LINK_RESET;
366 if (!snd_l->ackers) {
Tuong Lien26574db2018-12-19 09:17:57 +0700367 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400368 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500369 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400370 __skb_queue_purge(xmitq);
371 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400372}
373
374int tipc_link_bc_peers(struct tipc_link *l)
375{
376 return l->ackers;
377}
378
YueHaibinge064cce2018-07-19 17:16:59 +0800379static u16 link_bc_rcv_gap(struct tipc_link *l)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400380{
381 struct sk_buff *skb = skb_peek(&l->deferdq);
382 u16 gap = 0;
383
384 if (more(l->snd_nxt, l->rcv_nxt))
385 gap = l->snd_nxt - l->rcv_nxt;
386 if (skb)
387 gap = buf_seqno(skb) - l->rcv_nxt;
388 return gap;
389}
390
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400391void tipc_link_set_mtu(struct tipc_link *l, int mtu)
392{
393 l->mtu = mtu;
394}
395
396int tipc_link_mtu(struct tipc_link *l)
397{
398 return l->mtu;
399}
400
Tuong Lienfc1b6d62019-11-08 12:05:11 +0700401int tipc_link_mss(struct tipc_link *l)
402{
403#ifdef CONFIG_TIPC_CRYPTO
404 return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
405#else
406 return l->mtu - INT_H_SIZE;
407#endif
408}
409
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500410u16 tipc_link_rcv_nxt(struct tipc_link *l)
411{
412 return l->rcv_nxt;
413}
414
415u16 tipc_link_acked(struct tipc_link *l)
416{
417 return l->acked;
418}
419
420char *tipc_link_name(struct tipc_link *l)
421{
422 return l->name;
423}
424
LUU Duc Canhc140eb12018-09-26 21:00:54 +0200425u32 tipc_link_state(struct tipc_link *l)
426{
427 return l->state;
428}
429
Per Lidenb97bf3f2006-01-02 19:04:38 +0100430/**
Per Liden4323add2006-01-18 00:38:21 +0100431 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400432 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400433 * @if_name: associated interface name
434 * @bearer_id: id (index) of associated bearer
435 * @tolerance: link tolerance to be used by link
436 * @net_plane: network plane (A,B,c..) this link belongs to
437 * @mtu: mtu to be advertised by link
438 * @priority: priority to be used by link
439 * @window: send window to be used by link
440 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400441 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400442 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400443 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400444 * @bc_sndlink: the namespace global link used for broadcast sending
445 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400446 * @inputq: queue to put messages ready for delivery
447 * @namedq: queue to put binding table update messages ready for delivery
448 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900449 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400450 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100451 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400452bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400453 int tolerance, char net_plane, u32 mtu, int priority,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100454 int window, u32 session, u32 self,
455 u32 peer, u8 *peer_id, u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400456 struct tipc_link *bc_sndlink,
457 struct tipc_link *bc_rcvlink,
458 struct sk_buff_head *inputq,
459 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400460 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100461{
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100462 char peer_str[NODE_ID_STR_LEN] = {0,};
463 char self_str[NODE_ID_STR_LEN] = {0,};
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400464 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500465
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400466 l = kzalloc(sizeof(*l), GFP_ATOMIC);
467 if (!l)
468 return false;
469 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500470 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400471
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100472 /* Set link name for unicast links only */
473 if (peer_id) {
474 tipc_nodeid2string(self_str, tipc_own_id(net));
475 if (strlen(self_str) > 16)
476 sprintf(self_str, "%x", self);
477 tipc_nodeid2string(peer_str, peer_id);
478 if (strlen(peer_str) > 16)
479 sprintf(peer_str, "%x", peer);
480 }
481 /* Peer i/f name will be completed by reset/activate message */
Jon Maloy7494cfa2018-03-29 23:20:45 +0200482 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
483 self_str, if_name, peer_str);
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100484
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500485 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400486 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400487 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400488 l->net = net;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200489 l->in_session = false;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400490 l->bearer_id = bearer_id;
491 l->tolerance = tolerance;
Jon Maloy047491e2018-10-10 17:34:01 +0200492 if (bc_rcvlink)
493 bc_rcvlink->tolerance = tolerance;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400494 l->net_plane = net_plane;
495 l->advertised_mtu = mtu;
496 l->mtu = mtu;
497 l->priority = priority;
498 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400499 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400500 l->bc_sndlink = bc_sndlink;
501 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400502 l->inputq = inputq;
503 l->namedq = namedq;
504 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400505 __skb_queue_head_init(&l->transmq);
506 __skb_queue_head_init(&l->backlogq);
507 __skb_queue_head_init(&l->deferdq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700508 __skb_queue_head_init(&l->failover_deferdq);
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400509 skb_queue_head_init(&l->wakeupq);
510 skb_queue_head_init(l->inputq);
511 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100512}
513
Jon Paul Maloy32301902015-10-22 08:51:37 -0400514/**
515 * tipc_link_bc_create - create new link to be used for broadcast
516 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100517 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400518 * @window: send window to be used
519 * @inputq: queue to put messages ready for delivery
520 * @namedq: queue to put binding table update messages ready for delivery
521 * @link: return value, pointer to put the created link
522 *
523 * Returns true if link was created, otherwise false
524 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400525bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400526 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400527 struct sk_buff_head *inputq,
528 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400529 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400530 struct tipc_link **link)
531{
532 struct tipc_link *l;
533
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400534 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100535 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400536 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400537 return false;
538
539 l = *link;
540 strcpy(l->name, tipc_bclink_name);
Tuong Lien26574db2018-12-19 09:17:57 +0700541 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
Jon Paul Maloy32301902015-10-22 08:51:37 -0400542 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400543 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400544 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400545 l->bc_rcvlink = l;
546
547 /* Broadcast send link is always up */
548 if (link_is_bc_sndlink(l))
549 l->state = LINK_ESTABLISHED;
550
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500551 /* Disable replicast if even a single peer doesn't support it */
552 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
Hoang Leba5f6a82019-11-21 10:01:09 +0700553 tipc_bcast_toggle_rcast(net, false);
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500554
Jon Paul Maloy32301902015-10-22 08:51:37 -0400555 return true;
556}
557
Per Lidenb97bf3f2006-01-02 19:04:38 +0100558/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400559 * tipc_link_fsm_evt - link finite state machine
560 * @l: pointer to link
561 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400562 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400563int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400564{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400565 int rc = 0;
Tuong Lien26574db2018-12-19 09:17:57 +0700566 int old_state = l->state;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400567
568 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400569 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400570 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400571 case LINK_PEER_RESET_EVT:
572 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400573 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400574 case LINK_RESET_EVT:
575 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400576 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400577 case LINK_FAILURE_EVT:
578 case LINK_FAILOVER_BEGIN_EVT:
579 case LINK_ESTABLISH_EVT:
580 case LINK_FAILOVER_END_EVT:
581 case LINK_SYNCH_BEGIN_EVT:
582 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400583 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400584 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400585 }
586 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400587 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400588 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400589 case LINK_PEER_RESET_EVT:
590 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400591 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400592 case LINK_FAILOVER_BEGIN_EVT:
593 l->state = LINK_FAILINGOVER;
594 case LINK_FAILURE_EVT:
595 case LINK_RESET_EVT:
596 case LINK_ESTABLISH_EVT:
597 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400598 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400599 case LINK_SYNCH_BEGIN_EVT:
600 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400601 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400602 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400603 }
604 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400605 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400606 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400607 case LINK_RESET_EVT:
608 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400609 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400610 case LINK_PEER_RESET_EVT:
611 case LINK_ESTABLISH_EVT:
612 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400613 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400614 case LINK_SYNCH_BEGIN_EVT:
615 case LINK_SYNCH_END_EVT:
616 case LINK_FAILOVER_BEGIN_EVT:
617 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400618 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400619 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400620 }
621 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400622 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400623 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400624 case LINK_FAILOVER_END_EVT:
625 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400626 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400627 case LINK_PEER_RESET_EVT:
628 case LINK_RESET_EVT:
629 case LINK_ESTABLISH_EVT:
630 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400631 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400632 case LINK_FAILOVER_BEGIN_EVT:
633 case LINK_SYNCH_BEGIN_EVT:
634 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400635 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400636 goto illegal_evt;
637 }
638 break;
639 case LINK_ESTABLISHING:
640 switch (evt) {
641 case LINK_ESTABLISH_EVT:
642 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400643 break;
644 case LINK_FAILOVER_BEGIN_EVT:
645 l->state = LINK_FAILINGOVER;
646 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400647 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400648 l->state = LINK_RESET;
649 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400650 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400651 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400652 case LINK_SYNCH_BEGIN_EVT:
653 case LINK_FAILOVER_END_EVT:
654 break;
655 case LINK_SYNCH_END_EVT:
656 default:
657 goto illegal_evt;
658 }
659 break;
660 case LINK_ESTABLISHED:
661 switch (evt) {
662 case LINK_PEER_RESET_EVT:
663 l->state = LINK_PEER_RESET;
664 rc |= TIPC_LINK_DOWN_EVT;
665 break;
666 case LINK_FAILURE_EVT:
667 l->state = LINK_RESETTING;
668 rc |= TIPC_LINK_DOWN_EVT;
669 break;
670 case LINK_RESET_EVT:
671 l->state = LINK_RESET;
672 break;
673 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400674 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400675 break;
676 case LINK_SYNCH_BEGIN_EVT:
677 l->state = LINK_SYNCHING;
678 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400679 case LINK_FAILOVER_BEGIN_EVT:
680 case LINK_FAILOVER_END_EVT:
681 default:
682 goto illegal_evt;
683 }
684 break;
685 case LINK_SYNCHING:
686 switch (evt) {
687 case LINK_PEER_RESET_EVT:
688 l->state = LINK_PEER_RESET;
689 rc |= TIPC_LINK_DOWN_EVT;
690 break;
691 case LINK_FAILURE_EVT:
692 l->state = LINK_RESETTING;
693 rc |= TIPC_LINK_DOWN_EVT;
694 break;
695 case LINK_RESET_EVT:
696 l->state = LINK_RESET;
697 break;
698 case LINK_ESTABLISH_EVT:
699 case LINK_SYNCH_BEGIN_EVT:
700 break;
701 case LINK_SYNCH_END_EVT:
702 l->state = LINK_ESTABLISHED;
703 break;
704 case LINK_FAILOVER_BEGIN_EVT:
705 case LINK_FAILOVER_END_EVT:
706 default:
707 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400708 }
709 break;
710 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400711 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400712 }
Tuong Lien26574db2018-12-19 09:17:57 +0700713 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400714 return rc;
715illegal_evt:
716 pr_err("Illegal FSM event %x in state %x on link %s\n",
717 evt, l->state, l->name);
Tuong Lien26574db2018-12-19 09:17:57 +0700718 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400719 return rc;
720}
721
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400722/* link_profile_stats - update statistical profiling of traffic
723 */
724static void link_profile_stats(struct tipc_link *l)
725{
726 struct sk_buff *skb;
727 struct tipc_msg *msg;
728 int length;
729
730 /* Update counters used in statistical profiling of send traffic */
731 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
732 l->stats.queue_sz_counts++;
733
734 skb = skb_peek(&l->transmq);
735 if (!skb)
736 return;
737 msg = buf_msg(skb);
738 length = msg_size(msg);
739
740 if (msg_user(msg) == MSG_FRAGMENTER) {
741 if (msg_type(msg) != FIRST_FRAGMENT)
742 return;
Jon Maloya7dc51a2019-06-25 19:37:00 +0200743 length = msg_size(msg_inner_hdr(msg));
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400744 }
745 l->stats.msg_lengths_total += length;
746 l->stats.msg_length_counts++;
747 if (length <= 64)
748 l->stats.msg_length_profile[0]++;
749 else if (length <= 256)
750 l->stats.msg_length_profile[1]++;
751 else if (length <= 1024)
752 l->stats.msg_length_profile[2]++;
753 else if (length <= 4096)
754 l->stats.msg_length_profile[3]++;
755 else if (length <= 16384)
756 l->stats.msg_length_profile[4]++;
757 else if (length <= 32768)
758 l->stats.msg_length_profile[5]++;
759 else
760 l->stats.msg_length_profile[6]++;
761}
762
Tuong Lien26574db2018-12-19 09:17:57 +0700763/**
764 * tipc_link_too_silent - check if link is "too silent"
765 * @l: tipc link to be checked
766 *
767 * Returns true if the link 'silent_intv_cnt' is about to reach the
768 * 'abort_limit' value, otherwise false
769 */
770bool tipc_link_too_silent(struct tipc_link *l)
771{
772 return (l->silent_intv_cnt + 2 > l->abort_limit);
773}
774
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400775/* tipc_link_timeout - perform periodic task as instructed from node timeout
776 */
777int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
778{
Ying Xuec91522f2016-06-15 14:11:31 +0800779 int mtyp = 0;
780 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400781 bool state = false;
782 bool probe = false;
783 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400784 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
785 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400786 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400787
Tuong Lien26574db2018-12-19 09:17:57 +0700788 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
789 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400790 switch (l->state) {
791 case LINK_ESTABLISHED:
792 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400793 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400794 link_profile_stats(l);
795 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
796 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
797 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400798 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400799 state |= l->bc_rcvlink->rcv_unacked;
800 state |= l->rcv_unacked;
801 state |= !skb_queue_empty(&l->transmq);
802 state |= !skb_queue_empty(&l->deferdq);
803 probe = mstate->probing;
804 probe |= l->silent_intv_cnt;
805 if (probe || mstate->monitoring)
806 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400807 break;
808 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400809 setup = l->rst_cnt++ <= 4;
810 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400811 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400812 break;
813 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400814 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400815 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400816 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400817 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400818 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400819 case LINK_FAILINGOVER:
820 break;
821 default:
822 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400823 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400824
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400825 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100826 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400827
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400828 return rc;
829}
830
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400831/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400832 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500833 * @l: congested link
834 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400835 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100836 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500837static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100838{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500839 u32 dnode = tipc_own_addr(l->net);
840 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400841 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100842
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400843 /* Create and schedule wakeup pseudo message */
844 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500845 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400846 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400847 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500848 msg_set_dest_droppable(buf_msg(skb), true);
849 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
850 skb_queue_tail(&l->wakeupq, skb);
851 l->stats.link_congs++;
Tuong Lien26574db2018-12-19 09:17:57 +0700852 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400853 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100854}
855
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400856/**
857 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500858 * @l: congested link
859 * Wake up a number of waiting users, as permitted by available space
860 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400861 */
YueHaibinge064cce2018-07-19 17:16:59 +0800862static void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100863{
Jon Maloy7c5b4202019-07-30 16:23:18 +0200864 struct sk_buff_head *wakeupq = &l->wakeupq;
865 struct sk_buff_head *inputq = l->inputq;
Ying Xue58d78b32014-11-26 11:41:51 +0800866 struct sk_buff *skb, *tmp;
Jon Maloy7c5b4202019-07-30 16:23:18 +0200867 struct sk_buff_head tmpq;
868 int avail[5] = {0,};
869 int imp = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100870
Jon Maloy7c5b4202019-07-30 16:23:18 +0200871 __skb_queue_head_init(&tmpq);
872
873 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
874 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
875
876 skb_queue_walk_safe(wakeupq, skb, tmp) {
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400877 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Maloy7c5b4202019-07-30 16:23:18 +0200878 if (avail[imp] <= 0)
879 continue;
880 avail[imp]--;
881 __skb_unlink(skb, wakeupq);
882 __skb_queue_tail(&tmpq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100883 }
Jon Maloy7c5b4202019-07-30 16:23:18 +0200884
885 spin_lock_bh(&inputq->lock);
886 skb_queue_splice_tail(&tmpq, inputq);
887 spin_unlock_bh(&inputq->lock);
888
Per Lidenb97bf3f2006-01-02 19:04:38 +0100889}
890
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400891void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100892{
Ying Xuea1f8dd32018-10-11 19:57:56 +0800893 struct sk_buff_head list;
Tuong Liene95584a2019-10-02 18:49:43 +0700894 u32 imp;
Ying Xuea1f8dd32018-10-11 19:57:56 +0800895
896 __skb_queue_head_init(&list);
897
Jon Maloy7ea817f2018-07-10 01:07:36 +0200898 l->in_session = false;
Tuong Lienf7a93782019-04-16 10:48:07 +0700899 /* Force re-synch of peer session number before establishing */
900 l->peer_session--;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500901 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400902 l->mtu = l->advertised_mtu;
Ying Xuea1f8dd32018-10-11 19:57:56 +0800903
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200904 spin_lock_bh(&l->wakeupq.lock);
Ying Xuea1f8dd32018-10-11 19:57:56 +0800905 skb_queue_splice_init(&l->wakeupq, &list);
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200906 spin_unlock_bh(&l->wakeupq.lock);
907
Ying Xuea1f8dd32018-10-11 19:57:56 +0800908 spin_lock_bh(&l->inputq->lock);
909 skb_queue_splice_init(&list, l->inputq);
910 spin_unlock_bh(&l->inputq->lock);
911
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400912 __skb_queue_purge(&l->transmq);
913 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400914 __skb_queue_purge(&l->backlogq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700915 __skb_queue_purge(&l->failover_deferdq);
Tuong Liene95584a2019-10-02 18:49:43 +0700916 for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
917 l->backlog[imp].len = 0;
918 l->backlog[imp].target_bskb = NULL;
919 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400920 kfree_skb(l->reasm_buf);
Tuong Lien2320bcd2019-07-24 08:56:12 +0700921 kfree_skb(l->reasm_tnlmsg);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400922 kfree_skb(l->failover_reasm_skb);
923 l->reasm_buf = NULL;
Tuong Lien2320bcd2019-07-24 08:56:12 +0700924 l->reasm_tnlmsg = NULL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400925 l->failover_reasm_skb = NULL;
926 l->rcv_unacked = 0;
927 l->snd_nxt = 1;
928 l->rcv_nxt = 1;
Jon Maloy9012de52018-07-10 01:07:35 +0200929 l->snd_nxt_state = 1;
930 l->rcv_nxt_state = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400931 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400932 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400933 l->rst_cnt = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400934 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400935 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500936 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100937}
938
Per Lidenb97bf3f2006-01-02 19:04:38 +0100939/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400940 * tipc_link_xmit(): enqueue buffer list according to queue situation
941 * @link: link to use
942 * @list: chain of buffers containing message
943 * @xmitq: returned list of packets to be sent by caller
944 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500945 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400946 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
947 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
948 */
949int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
950 struct sk_buff_head *xmitq)
951{
952 struct tipc_msg *hdr = buf_msg(skb_peek(list));
Tuong Lien06e7c702019-11-01 09:58:57 +0700953 struct sk_buff_head *backlogq = &l->backlogq;
954 struct sk_buff_head *transmq = &l->transmq;
955 struct sk_buff *skb, *_skb;
956 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400957 u16 ack = l->rcv_nxt - 1;
958 u16 seqno = l->snd_nxt;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500959 int pkt_cnt = skb_queue_len(list);
Tuong Lien06e7c702019-11-01 09:58:57 +0700960 int imp = msg_importance(hdr);
Tuong Lienfc1b6d62019-11-08 12:05:11 +0700961 unsigned int mss = tipc_link_mss(l);
Tuong Lien06e7c702019-11-01 09:58:57 +0700962 unsigned int maxwin = l->window;
963 unsigned int mtu = l->mtu;
964 bool new_bundle;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500965 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400966
Richard Alpe4952cd32016-02-11 10:43:15 +0100967 if (unlikely(msg_size(hdr) > mtu)) {
Tuong Lien2320bcd2019-07-24 08:56:12 +0700968 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
969 skb_queue_len(list), msg_user(hdr),
970 msg_type(hdr), msg_size(hdr), mtu);
Jon Maloye654f9f2019-08-15 16:42:50 +0200971 __skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400972 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100973 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400974
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500975 /* Allow oversubscription of one data msg per source at congestion */
976 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
977 if (imp == TIPC_SYSTEM_IMPORTANCE) {
978 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
979 return -ENOBUFS;
980 }
981 rc = link_schedule_user(l, hdr);
982 }
983
Jon Paul Maloy95901122016-11-25 10:35:02 -0500984 if (pkt_cnt > 1) {
985 l->stats.sent_fragmented++;
986 l->stats.sent_fragments += pkt_cnt;
987 }
988
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400989 /* Prepare each packet for sending, and add to relevant queue: */
Tuong Lien06e7c702019-11-01 09:58:57 +0700990 while ((skb = __skb_dequeue(list))) {
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400991 if (likely(skb_queue_len(transmq) < maxwin)) {
Tuong Lien06e7c702019-11-01 09:58:57 +0700992 hdr = buf_msg(skb);
993 msg_set_seqno(hdr, seqno);
994 msg_set_ack(hdr, ack);
995 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400996 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100997 if (!_skb) {
Tuong Lien06e7c702019-11-01 09:58:57 +0700998 kfree_skb(skb);
Jon Maloye654f9f2019-08-15 16:42:50 +0200999 __skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001000 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +01001001 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001002 __skb_queue_tail(transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +07001003 /* next retransmit attempt */
1004 if (link_is_bc_sndlink(l))
Jon Maloy53962bc2019-06-28 17:06:20 +02001005 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001006 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001007 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001008 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001009 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001010 seqno++;
1011 continue;
1012 }
Tuong Lien06e7c702019-11-01 09:58:57 +07001013 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
Tuong Lienfc1b6d62019-11-08 12:05:11 +07001014 mss, l->addr, &new_bundle)) {
Tuong Lien06e7c702019-11-01 09:58:57 +07001015 if (skb) {
1016 /* Keep a ref. to the skb for next try */
1017 l->backlog[imp].target_bskb = skb;
1018 l->backlog[imp].len++;
1019 __skb_queue_tail(backlogq, skb);
1020 } else {
1021 if (new_bundle) {
1022 l->stats.sent_bundles++;
1023 l->stats.sent_bundled++;
1024 }
1025 l->stats.sent_bundled++;
1026 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001027 continue;
1028 }
Tuong Liene95584a2019-10-02 18:49:43 +07001029 l->backlog[imp].target_bskb = NULL;
Tuong Lien06e7c702019-11-01 09:58:57 +07001030 l->backlog[imp].len += (1 + skb_queue_len(list));
1031 __skb_queue_tail(backlogq, skb);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001032 skb_queue_splice_tail_init(list, backlogq);
1033 }
1034 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001035 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001036}
1037
YueHaibinge064cce2018-07-19 17:16:59 +08001038static void tipc_link_advance_backlog(struct tipc_link *l,
1039 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001040{
1041 struct sk_buff *skb, *_skb;
1042 struct tipc_msg *hdr;
1043 u16 seqno = l->snd_nxt;
1044 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001045 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Tuong Liene95584a2019-10-02 18:49:43 +07001046 u32 imp;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001047
1048 while (skb_queue_len(&l->transmq) < l->window) {
1049 skb = skb_peek(&l->backlogq);
1050 if (!skb)
1051 break;
1052 _skb = skb_clone(skb, GFP_ATOMIC);
1053 if (!_skb)
1054 break;
1055 __skb_dequeue(&l->backlogq);
1056 hdr = buf_msg(skb);
Tuong Liene95584a2019-10-02 18:49:43 +07001057 imp = msg_importance(hdr);
1058 l->backlog[imp].len--;
1059 if (unlikely(skb == l->backlog[imp].target_bskb))
1060 l->backlog[imp].target_bskb = NULL;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001061 __skb_queue_tail(&l->transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +07001062 /* next retransmit attempt */
1063 if (link_is_bc_sndlink(l))
Jon Maloy53962bc2019-06-28 17:06:20 +02001064 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Hoang Le05572272018-12-19 11:42:19 +07001065
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001066 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001067 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001068 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001069 msg_set_ack(hdr, ack);
1070 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001071 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001072 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001073 seqno++;
1074 }
1075 l->snd_nxt = seqno;
1076}
1077
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001078/**
1079 * link_retransmit_failure() - Detect repeated retransmit failures
1080 * @l: tipc link sender
1081 * @r: tipc link receiver (= l in case of unicast)
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001082 * @rc: returned code
1083 *
1084 * Return: true if the repeated retransmit failures happens, otherwise
1085 * false
1086 */
1087static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
Tuong Lien71204232019-08-15 10:24:08 +07001088 int *rc)
Allan Stephensd356eeb2006-06-25 23:40:01 -07001089{
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001090 struct sk_buff *skb = skb_peek(&l->transmq);
1091 struct tipc_msg *hdr;
Allan Stephensd356eeb2006-06-25 23:40:01 -07001092
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001093 if (!skb)
1094 return false;
Tuong Lien71204232019-08-15 10:24:08 +07001095
1096 if (!TIPC_SKB_CB(skb)->retr_cnt)
1097 return false;
1098
1099 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
Hoang Le426071f2019-11-06 13:26:10 +07001100 msecs_to_jiffies(r->tolerance * 10)))
Tuong Lien71204232019-08-15 10:24:08 +07001101 return false;
1102
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001103 hdr = buf_msg(skb);
Tuong Lien71204232019-08-15 10:24:08 +07001104 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1105 return false;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001106
Tuong Lien71204232019-08-15 10:24:08 +07001107 pr_warn("Retransmission failure on link <%s>\n", l->name);
1108 link_print(l, "State of link ");
1109 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1110 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1111 pr_info("sqno %u, prev: %x, dest: %x\n",
1112 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1113 pr_info("retr_stamp %d, retr_cnt %d\n",
1114 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1115 TIPC_SKB_CB(skb)->retr_cnt);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001116
Tuong Lien71204232019-08-15 10:24:08 +07001117 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1118 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1119 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001120
Tuong Lien71204232019-08-15 10:24:08 +07001121 if (link_is_bc_sndlink(l)) {
1122 r->state = LINK_RESET;
1123 *rc = TIPC_LINK_DOWN_EVT;
1124 } else {
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001125 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001126 }
1127
Tuong Lien71204232019-08-15 10:24:08 +07001128 return true;
Allan Stephensd356eeb2006-06-25 23:40:01 -07001129}
1130
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001131/* tipc_link_bc_retrans() - retransmit zero or more packets
Jon Maloya4dc70d2018-07-06 15:22:36 +02001132 * @l: the link to transmit on
1133 * @r: the receiving link ordering the retransmit. Same as l if unicast
1134 * @from: retransmit from (inclusive) this sequence number
1135 * @to: retransmit to (inclusive) this sequence number
1136 * xmitq: queue for accumulating the retransmitted packets
1137 */
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001138static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1139 u16 from, u16 to, struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001140{
1141 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001142 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Maloya4dc70d2018-07-06 15:22:36 +02001143 u16 ack = l->rcv_nxt - 1;
1144 struct tipc_msg *hdr;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001145 int rc = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001146
1147 if (!skb)
1148 return 0;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001149 if (less(to, from))
1150 return 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001151
Tuong Lien26574db2018-12-19 09:17:57 +07001152 trace_tipc_link_retrans(r, from, to, &l->transmq);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001153
Tuong Lien71204232019-08-15 10:24:08 +07001154 if (link_retransmit_failure(l, r, &rc))
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001155 return rc;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001156
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001157 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001158 hdr = buf_msg(skb);
Jon Maloya4dc70d2018-07-06 15:22:36 +02001159 if (less(msg_seqno(hdr), from))
1160 continue;
1161 if (more(msg_seqno(hdr), to))
1162 break;
Tuong Lien71204232019-08-15 10:24:08 +07001163
1164 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1165 continue;
1166 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Tuong Lienfc1b6d62019-11-08 12:05:11 +07001167 _skb = pskb_copy(skb, GFP_ATOMIC);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001168 if (!_skb)
1169 return 0;
1170 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001171 msg_set_ack(hdr, ack);
1172 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001173 _skb->priority = TC_PRIO_CONTROL;
1174 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001175 l->stats.retransmitted++;
Tuong Lien71204232019-08-15 10:24:08 +07001176
1177 /* Increase actual retrans counter & mark first time */
1178 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1179 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001180 }
1181 return 0;
1182}
1183
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001184/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001185 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001186 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001187 * Node lock must be held
1188 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001189static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001190 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001191{
Jon Maloy399574d2017-10-13 11:04:32 +02001192 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001193 struct tipc_msg *hdr = buf_msg(skb);
1194
1195 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001196 case TIPC_LOW_IMPORTANCE:
1197 case TIPC_MEDIUM_IMPORTANCE:
1198 case TIPC_HIGH_IMPORTANCE:
1199 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001200 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001201 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001202 return true;
1203 }
Gustavo A. R. Silvaf79e3362019-01-23 01:09:31 -06001204 /* fall through */
Jon Maloy2f487712017-10-13 11:04:31 +02001205 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001206 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001207 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001208 case GROUP_PROTOCOL:
1209 skb_queue_tail(mc_inputq, skb);
1210 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001211 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001212 l->bc_rcvlink->state = LINK_ESTABLISHED;
1213 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001214 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001215 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001216 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001217 case MSG_FRAGMENTER:
1218 case BCAST_PROTOCOL:
1219 return false;
1220 default:
1221 pr_warn("Dropping received illegal msg type\n");
1222 kfree_skb(skb);
Hoang Le7384b532019-02-11 09:18:28 +07001223 return true;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001224 };
1225}
1226
1227/* tipc_link_input - process packet that has passed link protocol check
1228 *
1229 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001230 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001231static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
Tuong Lien58ee86b2019-04-04 11:09:53 +07001232 struct sk_buff_head *inputq,
1233 struct sk_buff **reasm_skb)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001234{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001235 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001236 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001237 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001238 int usr = msg_user(hdr);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001239 int pos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001240
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001241 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001242 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001243 l->stats.recv_bundles++;
1244 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001245 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001246 tipc_data_input(l, iskb, &tmpq);
1247 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001248 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001249 } else if (usr == MSG_FRAGMENTER) {
1250 l->stats.recv_fragments++;
1251 if (tipc_buf_append(reasm_skb, &skb)) {
1252 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001253 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001254 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1255 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001256 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001257 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001258 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001259 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001260 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001261 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001262 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001263 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001264
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001265 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001266 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001267}
1268
Tuong Lien58ee86b2019-04-04 11:09:53 +07001269/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1270 * inner message along with the ones in the old link's
1271 * deferdq
1272 * @l: tunnel link
1273 * @skb: TUNNEL_PROTOCOL message
1274 * @inputq: queue to put messages ready for delivery
1275 */
1276static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1277 struct sk_buff_head *inputq)
1278{
1279 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001280 struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001281 struct sk_buff_head *fdefq = &l->failover_deferdq;
1282 struct tipc_msg *hdr = buf_msg(skb);
1283 struct sk_buff *iskb;
1284 int ipos = 0;
1285 int rc = 0;
1286 u16 seqno;
1287
Tuong Lien2320bcd2019-07-24 08:56:12 +07001288 if (msg_type(hdr) == SYNCH_MSG) {
1289 kfree_skb(skb);
1290 return 0;
1291 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001292
Tuong Lien2320bcd2019-07-24 08:56:12 +07001293 /* Not a fragment? */
1294 if (likely(!msg_nof_fragms(hdr))) {
1295 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1296 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1297 skb_queue_len(fdefq));
1298 return 0;
1299 }
1300 kfree_skb(skb);
1301 } else {
1302 /* Set fragment type for buf_append */
1303 if (msg_fragm_no(hdr) == 1)
1304 msg_set_type(hdr, FIRST_FRAGMENT);
1305 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1306 msg_set_type(hdr, FRAGMENT);
1307 else
1308 msg_set_type(hdr, LAST_FRAGMENT);
1309
1310 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1311 /* Successful but non-complete reassembly? */
1312 if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1313 return 0;
1314 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1315 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1316 }
1317 iskb = skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001318 }
1319
1320 do {
1321 seqno = buf_seqno(iskb);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001322 if (unlikely(less(seqno, l->drop_point))) {
1323 kfree_skb(iskb);
1324 continue;
1325 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001326 if (unlikely(seqno != l->drop_point)) {
1327 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1328 continue;
1329 }
1330
1331 l->drop_point++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001332 if (!tipc_data_input(l, iskb, inputq))
1333 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1334 if (unlikely(rc))
1335 break;
1336 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1337
Tuong Lien58ee86b2019-04-04 11:09:53 +07001338 return rc;
1339}
1340
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001341static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1342{
1343 bool released = false;
1344 struct sk_buff *skb, *tmp;
1345
1346 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1347 if (more(buf_seqno(skb), acked))
1348 break;
1349 __skb_unlink(skb, &l->transmq);
1350 kfree_skb(skb);
1351 released = true;
1352 }
1353 return released;
1354}
1355
Tuong Lien91959482019-04-04 11:09:51 +07001356/* tipc_build_gap_ack_blks - build Gap ACK blocks
1357 * @l: tipc link that data have come with gaps in sequence if any
1358 * @data: data buffer to store the Gap ACK blocks after built
1359 *
1360 * returns the actual allocated memory size
1361 */
1362static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1363{
1364 struct sk_buff *skb = skb_peek(&l->deferdq);
1365 struct tipc_gap_ack_blks *ga = data;
1366 u16 len, expect, seqno = 0;
1367 u8 n = 0;
1368
1369 if (!skb)
1370 goto exit;
1371
1372 expect = buf_seqno(skb);
1373 skb_queue_walk(&l->deferdq, skb) {
1374 seqno = buf_seqno(skb);
1375 if (unlikely(more(seqno, expect))) {
1376 ga->gacks[n].ack = htons(expect - 1);
1377 ga->gacks[n].gap = htons(seqno - expect);
1378 if (++n >= MAX_GAP_ACK_BLKS) {
1379 pr_info_ratelimited("Too few Gap ACK blocks!\n");
1380 goto exit;
1381 }
1382 } else if (unlikely(less(seqno, expect))) {
1383 pr_warn("Unexpected skb in deferdq!\n");
1384 continue;
1385 }
1386 expect = seqno + 1;
1387 }
1388
1389 /* last block */
1390 ga->gacks[n].ack = htons(seqno);
1391 ga->gacks[n].gap = 0;
1392 n++;
1393
1394exit:
1395 len = tipc_gap_ack_blks_sz(n);
1396 ga->len = htons(len);
1397 ga->gack_cnt = n;
1398 return len;
1399}
1400
1401/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1402 * acked packets, also doing retransmissions if
1403 * gaps found
1404 * @l: tipc link with transmq queue to be advanced
1405 * @acked: seqno of last packet acked by peer without any gaps before
1406 * @gap: # of gap packets
1407 * @ga: buffer pointer to Gap ACK blocks from peer
1408 * @xmitq: queue for accumulating the retransmitted packets if any
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001409 *
1410 * In case of a repeated retransmit failures, the call will return shortly
1411 * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
Tuong Lien91959482019-04-04 11:09:51 +07001412 */
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001413static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1414 struct tipc_gap_ack_blks *ga,
1415 struct sk_buff_head *xmitq)
Tuong Lien91959482019-04-04 11:09:51 +07001416{
1417 struct sk_buff *skb, *_skb, *tmp;
1418 struct tipc_msg *hdr;
1419 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1420 u16 ack = l->rcv_nxt - 1;
Tuong Lien71204232019-08-15 10:24:08 +07001421 bool passed = false;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001422 u16 seqno, n = 0;
1423 int rc = 0;
1424
Tuong Lien91959482019-04-04 11:09:51 +07001425 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1426 seqno = buf_seqno(skb);
1427
1428next_gap_ack:
1429 if (less_eq(seqno, acked)) {
1430 /* release skb */
1431 __skb_unlink(skb, &l->transmq);
1432 kfree_skb(skb);
1433 } else if (less_eq(seqno, acked + gap)) {
Tuong Lien71204232019-08-15 10:24:08 +07001434 /* First, check if repeated retrans failures occurs? */
1435 if (!passed && link_retransmit_failure(l, l, &rc))
1436 return rc;
1437 passed = true;
1438
1439 /* retransmit skb if unrestricted*/
Tuong Lien382f5982019-04-04 11:09:52 +07001440 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1441 continue;
1442 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
Tuong Lienfc1b6d62019-11-08 12:05:11 +07001443 _skb = pskb_copy(skb, GFP_ATOMIC);
Tuong Lien91959482019-04-04 11:09:51 +07001444 if (!_skb)
1445 continue;
1446 hdr = buf_msg(_skb);
1447 msg_set_ack(hdr, ack);
1448 msg_set_bcast_ack(hdr, bc_ack);
1449 _skb->priority = TC_PRIO_CONTROL;
1450 __skb_queue_tail(xmitq, _skb);
1451 l->stats.retransmitted++;
Tuong Lien71204232019-08-15 10:24:08 +07001452
1453 /* Increase actual retrans counter & mark first time */
1454 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1455 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
Tuong Lien91959482019-04-04 11:09:51 +07001456 } else {
1457 /* retry with Gap ACK blocks if any */
1458 if (!ga || n >= ga->gack_cnt)
1459 break;
1460 acked = ntohs(ga->gacks[n].ack);
1461 gap = ntohs(ga->gacks[n].gap);
1462 n++;
1463 goto next_gap_ack;
1464 }
1465 }
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001466
1467 return 0;
Tuong Lien91959482019-04-04 11:09:51 +07001468}
1469
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001470/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001471 *
1472 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1473 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001474 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001475int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001476{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001477 if (!l)
1478 return 0;
1479
1480 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1481 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001482 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001483 return 0;
1484 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001485
1486 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1487 l->snd_nxt = l->rcv_nxt;
1488 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001489 }
1490
1491 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001492 l->rcv_unacked = 0;
1493 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001494 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001495 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001496}
1497
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001498/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1499 */
1500void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1501{
1502 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001503 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001504
1505 if (l->state == LINK_ESTABLISHING)
1506 mtyp = ACTIVATE_MSG;
1507
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001508 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001509
1510 /* Inform peer that this endpoint is going down if applicable */
1511 skb = skb_peek_tail(xmitq);
1512 if (skb && (l->state == LINK_RESET))
1513 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001514}
1515
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001516/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001517 * Note that sending of broadcast NACK is coordinated among nodes, to
1518 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001519 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001520static int tipc_link_build_nack_msg(struct tipc_link *l,
1521 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001522{
1523 u32 def_cnt = ++l->stats.deferred_recv;
Tuong Lien382f5982019-04-04 11:09:52 +07001524 u32 defq_len = skb_queue_len(&l->deferdq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001525 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001526
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001527 if (link_is_bc_rcvlink(l)) {
1528 match1 = def_cnt & 0xf;
1529 match2 = tipc_own_addr(l->net) & 0xf;
1530 if (match1 == match2)
1531 return TIPC_LINK_SND_STATE;
1532 return 0;
1533 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001534
Tuong Lien382f5982019-04-04 11:09:52 +07001535 if (defq_len >= 3 && !((defq_len - 3) % 16))
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001536 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001537 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001538}
1539
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001540/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001541 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001542 * @skb: TIPC packet
1543 * @xmitq: queue to place packets to be sent after this call
1544 */
1545int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1546 struct sk_buff_head *xmitq)
1547{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001548 struct sk_buff_head *defq = &l->deferdq;
Tuong Lien382f5982019-04-04 11:09:52 +07001549 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001550 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001551 int rc = 0;
1552
Tuong Lien382f5982019-04-04 11:09:52 +07001553 /* Verify and update link state */
1554 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1555 return tipc_link_proto_rcv(l, skb, xmitq);
1556
1557 /* Don't send probe at next timeout expiration */
1558 l->silent_intv_cnt = 0;
1559
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001560 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001561 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001562 seqno = msg_seqno(hdr);
1563 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001564 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001565
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001566 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001567 if (l->state == LINK_ESTABLISHING)
1568 rc = TIPC_LINK_UP_EVT;
1569 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001570 }
1571
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001572 /* Drop if outside receive window */
1573 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1574 l->stats.duplicates++;
1575 goto drop;
1576 }
1577
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001578 /* Forward queues and wake up waiting users */
1579 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1580 tipc_link_advance_backlog(l, xmitq);
1581 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1582 link_prepare_wakeup(l);
1583 }
1584
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001585 /* Defer delivery if sequence gap */
1586 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001587 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001588 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001589 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001590 }
1591
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001592 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001593 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001594 l->stats.recv_pkts++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001595
1596 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1597 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1598 else if (!tipc_data_input(l, skb, l->inputq))
1599 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001600 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001601 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001602 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001603 break;
Tuong Lien382f5982019-04-04 11:09:52 +07001604 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001605
1606 return rc;
1607drop:
1608 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001609 return rc;
1610}
1611
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001612static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001613 bool probe_reply, u16 rcvgap,
1614 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001615 struct sk_buff_head *xmitq)
1616{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001617 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001618 struct sk_buff *skb;
1619 struct tipc_msg *hdr;
1620 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001621 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001622 struct tipc_mon_state *mstate = &l->mon_state;
1623 int dlen = 0;
1624 void *data;
Tuong Lien91959482019-04-04 11:09:51 +07001625 u16 glen = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001626
1627 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001628 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001629 return;
1630
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001631 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1632 return;
1633
1634 if (!skb_queue_empty(dfq))
1635 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1636
1637 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Tuong Lien91959482019-04-04 11:09:51 +07001638 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1639 l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001640 if (!skb)
1641 return;
1642
1643 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001644 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001645 msg_set_session(hdr, l->session);
1646 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001647 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001648 msg_set_next_sent(hdr, l->snd_nxt);
1649 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001650 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001651 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001652 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001653 msg_set_link_tolerance(hdr, tolerance);
1654 msg_set_linkprio(hdr, priority);
1655 msg_set_redundant_link(hdr, node_up);
1656 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001657 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001658
1659 if (mtyp == STATE_MSG) {
Jon Maloy9012de52018-07-10 01:07:35 +02001660 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1661 msg_set_seqno(hdr, l->snd_nxt_state++);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001662 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001663 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001664 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001665 msg_set_is_keepalive(hdr, probe || probe_reply);
Tuong Lien91959482019-04-04 11:09:51 +07001666 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1667 glen = tipc_build_gap_ack_blks(l, data);
1668 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1669 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1670 skb_trim(skb, INT_H_SIZE + glen + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001671 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001672 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001673 } else {
1674 /* RESET_MSG or ACTIVATE_MSG */
Tuong Lien91986ee2019-02-11 13:29:43 +07001675 if (mtyp == ACTIVATE_MSG) {
1676 msg_set_dest_session_valid(hdr, 1);
1677 msg_set_dest_session(hdr, l->peer_session);
1678 }
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001679 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001680 strcpy(data, l->if_name);
1681 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1682 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001683 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001684 if (probe)
1685 l->stats.sent_probes++;
1686 if (rcvgap)
1687 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001688 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001689 __skb_queue_tail(xmitq, skb);
Tuong Lien26574db2018-12-19 09:17:57 +07001690 trace_tipc_proto_build(skb, false, l->name);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001691}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001692
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001693void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1694 struct sk_buff_head *xmitq)
1695{
1696 u32 onode = tipc_own_addr(l->net);
1697 struct tipc_msg *hdr, *ihdr;
1698 struct sk_buff_head tnlq;
1699 struct sk_buff *skb;
1700 u32 dnode = l->addr;
1701
Jon Maloye654f9f2019-08-15 16:42:50 +02001702 __skb_queue_head_init(&tnlq);
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001703 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1704 INT_H_SIZE, BASIC_H_SIZE,
1705 dnode, onode, 0, 0, 0);
1706 if (!skb) {
1707 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1708 return;
1709 }
1710
1711 hdr = buf_msg(skb);
1712 msg_set_msgcnt(hdr, 1);
1713 msg_set_bearer_id(hdr, l->peer_bearer_id);
1714
1715 ihdr = (struct tipc_msg *)msg_data(hdr);
1716 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1717 BASIC_H_SIZE, dnode);
1718 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1719 __skb_queue_tail(&tnlq, skb);
1720 tipc_link_xmit(l, &tnlq, xmitq);
1721}
1722
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001723/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001724 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001725 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001726void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1727 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001728{
Tuong Lien58ee86b2019-04-04 11:09:53 +07001729 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001730 struct sk_buff *skb, *tnlskb;
1731 struct tipc_msg *hdr, tnlhdr;
1732 struct sk_buff_head *queue = &l->transmq;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001733 struct sk_buff_head tmpxq, tnlq, frags;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001734 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001735 bool pktcnt_need_update = false;
Tuong Lien4929a932019-07-24 08:56:11 +07001736 u16 syncpt;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001737 int rc;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001738
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001739 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001740 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001741
Jon Maloye654f9f2019-08-15 16:42:50 +02001742 __skb_queue_head_init(&tnlq);
Tuong Lien4929a932019-07-24 08:56:11 +07001743 /* Link Synching:
1744 * From now on, send only one single ("dummy") SYNCH message
1745 * to peer. The SYNCH message does not contain any data, just
1746 * a header conveying the synch point to the peer.
1747 */
1748 if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1749 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1750 INT_H_SIZE, 0, l->addr,
1751 tipc_own_addr(l->net),
1752 0, 0, 0);
1753 if (!tnlskb) {
1754 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1755 link_co_err);
1756 return;
1757 }
1758
1759 hdr = buf_msg(tnlskb);
1760 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1761 msg_set_syncpt(hdr, syncpt);
1762 msg_set_bearer_id(hdr, l->peer_bearer_id);
1763 __skb_queue_tail(&tnlq, tnlskb);
1764 tipc_link_xmit(tnl, &tnlq, xmitq);
1765 return;
1766 }
1767
Tuong Liend0d605c2019-11-06 18:12:17 +07001768 __skb_queue_head_init(&tmpxq);
1769 __skb_queue_head_init(&frags);
1770 /* At least one packet required for safe algorithm => add dummy */
1771 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1772 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1773 0, 0, TIPC_ERR_NO_PORT);
1774 if (!skb) {
1775 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1776 return;
1777 }
1778 __skb_queue_tail(&tnlq, skb);
1779 tipc_link_xmit(l, &tnlq, &tmpxq);
1780 __skb_queue_purge(&tmpxq);
1781
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001782 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001783 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001784 mtyp, INT_H_SIZE, l->addr);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001785 if (mtyp == SYNCH_MSG)
1786 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1787 else
1788 pktcnt = skb_queue_len(&l->transmq);
1789 pktcnt += skb_queue_len(&l->backlogq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001790 msg_set_msgcnt(&tnlhdr, pktcnt);
1791 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1792tnl:
1793 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001794 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001795 hdr = buf_msg(skb);
1796 if (queue == &l->backlogq)
1797 msg_set_seqno(hdr, seqno++);
1798 pktlen = msg_size(hdr);
Tuong Lien2320bcd2019-07-24 08:56:12 +07001799
1800 /* Tunnel link MTU is not large enough? This could be
1801 * due to:
1802 * 1) Link MTU has just changed or set differently;
1803 * 2) Or FAILOVER on the top of a SYNCH message
1804 *
1805 * The 2nd case should not happen if peer supports
1806 * TIPC_TUNNEL_ENHANCED
1807 */
1808 if (pktlen > tnl->mtu - INT_H_SIZE) {
1809 if (mtyp == FAILOVER_MSG &&
1810 (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1811 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
1812 &frags);
1813 if (rc) {
1814 pr_warn("%sunable to frag msg: rc %d\n",
1815 link_co_err, rc);
1816 return;
1817 }
1818 pktcnt += skb_queue_len(&frags) - 1;
1819 pktcnt_need_update = true;
1820 skb_queue_splice_tail_init(&frags, &tnlq);
1821 continue;
1822 }
1823 /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
1824 * => Just warn it and return!
1825 */
1826 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
1827 link_co_err, msg_user(hdr),
1828 msg_type(hdr), msg_size(hdr));
1829 return;
1830 }
1831
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001832 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01001833 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001834 if (!tnlskb) {
1835 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001836 return;
1837 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001838 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1839 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1840 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001841 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001842 if (queue != &l->backlogq) {
1843 queue = &l->backlogq;
1844 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001845 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001846
Tuong Lien2320bcd2019-07-24 08:56:12 +07001847 if (pktcnt_need_update)
1848 skb_queue_walk(&tnlq, skb) {
1849 hdr = buf_msg(skb);
1850 msg_set_msgcnt(hdr, pktcnt);
1851 }
1852
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001853 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001854
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001855 if (mtyp == FAILOVER_MSG) {
1856 tnl->drop_point = l->rcv_nxt;
1857 tnl->failover_reasm_skb = l->reasm_buf;
1858 l->reasm_buf = NULL;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001859
1860 /* Failover the link's deferdq */
1861 if (unlikely(!skb_queue_empty(fdefq))) {
1862 pr_warn("Link failover deferdq not empty: %d!\n",
1863 skb_queue_len(fdefq));
1864 __skb_queue_purge(fdefq);
1865 }
1866 skb_queue_splice_init(&l->deferdq, fdefq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001867 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001868}
1869
Tuong Lienc0b14a02019-05-02 17:23:23 +07001870/**
1871 * tipc_link_failover_prepare() - prepare tnl for link failover
1872 *
1873 * This is a special version of the precursor - tipc_link_tnl_prepare(),
1874 * see the tipc_node_link_failover() for details
1875 *
1876 * @l: failover link
1877 * @tnl: tunnel link
1878 * @xmitq: queue for messages to be xmited
1879 */
1880void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1881 struct sk_buff_head *xmitq)
1882{
1883 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1884
1885 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1886
Geert Uytterhoeven8ebed8a2019-10-24 17:30:43 +02001887 /* This failover link endpoint was never established before,
Tuong Lienc0b14a02019-05-02 17:23:23 +07001888 * so it has not received anything from peer.
1889 * Otherwise, it must be a normal failover situation or the
1890 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1891 * would have to start over from scratch instead.
1892 */
Tuong Lienc0b14a02019-05-02 17:23:23 +07001893 tnl->drop_point = 1;
1894 tnl->failover_reasm_skb = NULL;
1895
1896 /* Initiate the link's failover deferdq */
1897 if (unlikely(!skb_queue_empty(fdefq))) {
1898 pr_warn("Link failover deferdq not empty: %d!\n",
1899 skb_queue_len(fdefq));
1900 __skb_queue_purge(fdefq);
1901 }
1902}
1903
Jon Maloy7ea817f2018-07-10 01:07:36 +02001904/* tipc_link_validate_msg(): validate message against current link state
1905 * Returns true if message should be accepted, otherwise false
1906 */
1907bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1908{
1909 u16 curr_session = l->peer_session;
1910 u16 session = msg_session(hdr);
1911 int mtyp = msg_type(hdr);
1912
1913 if (msg_user(hdr) != LINK_PROTOCOL)
1914 return true;
1915
1916 switch (mtyp) {
1917 case RESET_MSG:
1918 if (!l->in_session)
1919 return true;
1920 /* Accept only RESET with new session number */
1921 return more(session, curr_session);
1922 case ACTIVATE_MSG:
1923 if (!l->in_session)
1924 return true;
1925 /* Accept only ACTIVATE with new or current session number */
1926 return !less(session, curr_session);
1927 case STATE_MSG:
1928 /* Accept only STATE with current session number */
1929 if (!l->in_session)
1930 return false;
1931 if (session != curr_session)
1932 return false;
LUU Duc Canhd949cfe2018-09-26 22:28:52 +02001933 /* Extra sanity check */
1934 if (!link_is_up(l) && msg_ack(hdr))
1935 return false;
Jon Maloy7ea817f2018-07-10 01:07:36 +02001936 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1937 return true;
1938 /* Accept only STATE with new sequence number */
1939 return !less(msg_seqno(hdr), l->rcv_nxt_state);
1940 default:
1941 return false;
1942 }
1943}
1944
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001945/* tipc_link_proto_rcv(): receive link level protocol message :
1946 * Note that network plane id propagates through the network, and may
1947 * change at any time. The node with lowest numerical id determines
1948 * network plane
1949 */
1950static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1951 struct sk_buff_head *xmitq)
1952{
1953 struct tipc_msg *hdr = buf_msg(skb);
Tuong Lien91959482019-04-04 11:09:51 +07001954 struct tipc_gap_ack_blks *ga = NULL;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001955 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001956 u16 ack = msg_ack(hdr);
1957 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001958 u16 peers_snd_nxt = msg_next_sent(hdr);
1959 u16 peers_tol = msg_link_tolerance(hdr);
1960 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001961 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001962 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001963 int mtyp = msg_type(hdr);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001964 bool reply = msg_probe(hdr);
Tuong Lien91959482019-04-04 11:09:51 +07001965 u16 glen = 0;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001966 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001967 char *if_name;
1968 int rc = 0;
1969
Tuong Lien26574db2018-12-19 09:17:57 +07001970 trace_tipc_proto_rcv(skb, false, l->name);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001971 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001972 goto exit;
1973
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001974 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001975 l->net_plane = msg_net_plane(hdr);
1976
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001977 skb_linearize(skb);
1978 hdr = buf_msg(skb);
1979 data = msg_data(hdr);
1980
Tuong Lien26574db2018-12-19 09:17:57 +07001981 if (!tipc_link_validate_msg(l, hdr)) {
1982 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1983 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
Jon Maloy7ea817f2018-07-10 01:07:36 +02001984 goto exit;
Tuong Lien26574db2018-12-19 09:17:57 +07001985 }
Jon Maloy7ea817f2018-07-10 01:07:36 +02001986
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001987 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001988 case RESET_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001989 case ACTIVATE_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001990 /* Complete own link name with peer's interface name */
1991 if_name = strrchr(l->name, ':') + 1;
1992 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1993 break;
1994 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1995 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001996 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001997
1998 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02001999 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002000 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002001 l->bc_rcvlink->tolerance = peers_tol;
2002 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002003 /* Update own priority if peer's priority is higher */
2004 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2005 l->priority = peers_prio;
2006
Jon Maloy7ab412d2018-11-10 17:30:24 -05002007 /* If peer is going down we want full re-establish cycle */
2008 if (msg_peer_stopping(hdr)) {
Jon Paul Maloy634696b2016-04-15 13:33:03 -04002009 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Maloy7ab412d2018-11-10 17:30:24 -05002010 break;
2011 }
Tuong Lien91986ee2019-02-11 13:29:43 +07002012
2013 /* If this endpoint was re-created while peer was ESTABLISHING
2014 * it doesn't know current session number. Force re-synch.
2015 */
2016 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2017 l->session != msg_dest_session(hdr)) {
2018 if (less(l->session, msg_dest_session(hdr)))
2019 l->session = msg_dest_session(hdr) + 1;
2020 break;
2021 }
2022
Jon Maloy7ab412d2018-11-10 17:30:24 -05002023 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2024 if (mtyp == RESET_MSG || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002025 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2026
2027 /* ACTIVATE_MSG takes up link if it was already locally reset */
Jon Maloy7ab412d2018-11-10 17:30:24 -05002028 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002029 rc = TIPC_LINK_UP_EVT;
2030
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002031 l->peer_session = msg_session(hdr);
Jon Maloy7ea817f2018-07-10 01:07:36 +02002032 l->in_session = true;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002033 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002034 if (l->mtu > msg_max_pkt(hdr))
2035 l->mtu = msg_max_pkt(hdr);
2036 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002037
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002038 case STATE_MSG:
Jon Maloy9012de52018-07-10 01:07:35 +02002039 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2040
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002041 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02002042 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002043 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002044 l->bc_rcvlink->tolerance = peers_tol;
2045 }
Jon Paul Maloyf7967552016-11-23 21:05:26 -05002046 /* Update own prio if peer indicates a different value */
2047 if ((peers_prio != l->priority) &&
2048 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01002049 l->priority = peers_prio;
2050 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2051 }
2052
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002053 l->silent_intv_cnt = 0;
2054 l->stats.recv_states++;
2055 if (msg_probe(hdr))
2056 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002057
2058 if (!link_is_up(l)) {
2059 if (l->state == LINK_ESTABLISHING)
2060 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002061 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002062 }
Tuong Lien91959482019-04-04 11:09:51 +07002063
2064 /* Receive Gap ACK blocks from peer if any */
2065 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
2066 ga = (struct tipc_gap_ack_blks *)data;
2067 glen = ntohs(ga->len);
2068 /* sanity check: if failed, ignore Gap ACK blocks */
2069 if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
2070 ga = NULL;
2071 }
2072
2073 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04002074 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002075
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002076 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04002077 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002078 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002079 if (rcvgap || reply)
2080 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2081 rcvgap, 0, 0, xmitq);
Tuong Lien91959482019-04-04 11:09:51 +07002082
Tuong Lien6a6b5c82019-06-17 12:15:42 +07002083 rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002084
2085 /* If NACK, retransmit will now start at right position */
Tuong Lien91959482019-04-04 11:09:51 +07002086 if (gap)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002087 l->stats.recv_nacks++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002088
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002089 tipc_link_advance_backlog(l, xmitq);
2090 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2091 link_prepare_wakeup(l);
2092 }
2093exit:
2094 kfree_skb(skb);
2095 return rc;
2096}
2097
Jon Paul Maloy52666982015-10-22 08:51:41 -04002098/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2099 */
2100static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2101 u16 peers_snd_nxt,
2102 struct sk_buff_head *xmitq)
2103{
2104 struct sk_buff *skb;
2105 struct tipc_msg *hdr;
2106 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2107 u16 ack = l->rcv_nxt - 1;
2108 u16 gap_to = peers_snd_nxt - 1;
2109
2110 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002111 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002112 if (!skb)
2113 return false;
2114 hdr = buf_msg(skb);
2115 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2116 msg_set_bcast_ack(hdr, ack);
2117 msg_set_bcgap_after(hdr, ack);
2118 if (dfrd_skb)
2119 gap_to = buf_seqno(dfrd_skb) - 1;
2120 msg_set_bcgap_to(hdr, gap_to);
2121 msg_set_non_seq(hdr, bcast);
2122 __skb_queue_tail(xmitq, skb);
2123 return true;
2124}
2125
2126/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2127 *
2128 * Give a newly added peer node the sequence number where it should
2129 * start receiving and acking broadcast packets.
2130 */
Wu Fengguang742e0382015-10-24 22:56:01 +08002131static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2132 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002133{
2134 struct sk_buff_head list;
2135
2136 __skb_queue_head_init(&list);
2137 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2138 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04002139 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002140 tipc_link_xmit(l, &list, xmitq);
2141}
2142
2143/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2144 */
2145void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2146{
2147 int mtyp = msg_type(hdr);
2148 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2149
2150 if (link_is_up(l))
2151 return;
2152
2153 if (msg_user(hdr) == BCAST_PROTOCOL) {
2154 l->rcv_nxt = peers_snd_nxt;
2155 l->state = LINK_ESTABLISHED;
2156 return;
2157 }
2158
2159 if (l->peer_caps & TIPC_BCAST_SYNCH)
2160 return;
2161
2162 if (msg_peer_node_is_up(hdr))
2163 return;
2164
2165 /* Compatibility: accept older, less safe initial synch data */
2166 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2167 l->rcv_nxt = peers_snd_nxt;
2168}
2169
2170/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2171 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002172int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2173 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002174{
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04002175 struct tipc_link *snd_l = l->bc_sndlink;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002176 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002177 u16 from = msg_bcast_ack(hdr) + 1;
2178 u16 to = from + msg_bc_gap(hdr) - 1;
2179 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002180
2181 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002182 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002183
2184 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002185 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002186
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04002187 /* Open when peer ackowledges our bcast init msg (pkt #1) */
2188 if (msg_ack(hdr))
2189 l->bc_peer_is_up = true;
2190
2191 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002192 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002193
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04002194 l->stats.recv_nacks++;
2195
Jon Paul Maloy52666982015-10-22 08:51:41 -04002196 /* Ignore if peers_snd_nxt goes beyond receive window */
2197 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002198 return rc;
2199
Tuong Lien6a6b5c82019-06-17 12:15:42 +07002200 rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002201
2202 l->snd_nxt = peers_snd_nxt;
2203 if (link_bc_rcv_gap(l))
2204 rc |= TIPC_LINK_SND_STATE;
2205
2206 /* Return now if sender supports nack via STATE messages */
2207 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2208 return rc;
2209
2210 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04002211
2212 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2213 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002214 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002215 }
2216
2217 /* Don't NACK if one was recently sent or peeked */
2218 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2219 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002220 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002221 }
2222
2223 /* Conditionally delay NACK sending until next synch rcv */
2224 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2225 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2226 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002227 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002228 }
2229
2230 /* Send NACK now but suppress next one */
2231 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2232 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002233 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002234}
2235
2236void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2237 struct sk_buff_head *xmitq)
2238{
2239 struct sk_buff *skb, *tmp;
2240 struct tipc_link *snd_l = l->bc_sndlink;
2241
2242 if (!link_is_up(l) || !l->bc_peer_is_up)
2243 return;
2244
2245 if (!more(acked, l->acked))
2246 return;
2247
Tuong Lien26574db2018-12-19 09:17:57 +07002248 trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002249 /* Skip over packets peer has already acked */
2250 skb_queue_walk(&snd_l->transmq, skb) {
2251 if (more(buf_seqno(skb), l->acked))
2252 break;
2253 }
2254
2255 /* Update/release the packets peer is acking now */
2256 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2257 if (more(buf_seqno(skb), acked))
2258 break;
2259 if (!--TIPC_SKB_CB(skb)->ackers) {
2260 __skb_unlink(skb, &snd_l->transmq);
2261 kfree_skb(skb);
2262 }
2263 }
2264 l->acked = acked;
2265 tipc_link_advance_backlog(snd_l, xmitq);
2266 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2267 link_prepare_wakeup(snd_l);
2268}
2269
2270/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002271 * This function is here for backwards compatibility, since
2272 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04002273 */
2274int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2275 struct sk_buff_head *xmitq)
2276{
2277 struct tipc_msg *hdr = buf_msg(skb);
2278 u32 dnode = msg_destnode(hdr);
2279 int mtyp = msg_type(hdr);
2280 u16 acked = msg_bcast_ack(hdr);
2281 u16 from = acked + 1;
2282 u16 to = msg_bcgap_to(hdr);
2283 u16 peers_snd_nxt = to + 1;
2284 int rc = 0;
2285
2286 kfree_skb(skb);
2287
2288 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2289 return 0;
2290
2291 if (mtyp != STATE_MSG)
2292 return 0;
2293
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002294 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04002295 tipc_link_bc_ack_rcv(l, acked, xmitq);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07002296 rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002297 l->stats.recv_nacks++;
2298 return rc;
2299 }
2300
2301 /* Msg for other node => suppress own NACK at next sync if applicable */
2302 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2303 l->nack_state = BC_NACK_SND_SUPPRESS;
2304
2305 return 0;
2306}
2307
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04002308void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002309{
Jon Maloy218527f2018-03-29 23:20:41 +02002310 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04002311
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04002312 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04002313 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
2314 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
2315 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
2316 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002317 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002318}
2319
Allan Stephens5c216e12011-10-18 11:34:29 -04002320/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002321 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05002322 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01002323 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002324void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002325{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002326 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01002327}
2328
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002329static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002330{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002331 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04002332 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002333 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08002334
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002335 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002336 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2337 skb_queue_len(&l->transmq), head, tail,
2338 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002339}
Richard Alpe0655f6a2014-11-20 10:29:07 +01002340
2341/* Parse and validate nested (link) properties valid for media, bearer and link
2342 */
2343int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2344{
2345 int err;
2346
Johannes Berg8cb08172019-04-26 14:07:28 +02002347 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2348 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01002349 if (err)
2350 return err;
2351
2352 if (props[TIPC_NLA_PROP_PRIO]) {
2353 u32 prio;
2354
2355 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2356 if (prio > TIPC_MAX_LINK_PRI)
2357 return -EINVAL;
2358 }
2359
2360 if (props[TIPC_NLA_PROP_TOL]) {
2361 u32 tol;
2362
2363 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2364 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2365 return -EINVAL;
2366 }
2367
2368 if (props[TIPC_NLA_PROP_WIN]) {
2369 u32 win;
2370
2371 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2372 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2373 return -EINVAL;
2374 }
2375
2376 return 0;
2377}
Richard Alpe7be57fc2014-11-20 10:29:12 +01002378
Richard Alped8182802014-11-24 11:10:29 +01002379static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002380{
2381 int i;
2382 struct nlattr *stats;
2383
2384 struct nla_map {
2385 u32 key;
2386 u32 val;
2387 };
2388
2389 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002390 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002391 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2392 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2393 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2394 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002395 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002396 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2397 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2398 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2399 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2400 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2401 s->msg_length_counts : 1},
2402 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2403 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2404 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2405 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2406 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2407 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2408 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2409 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2410 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2411 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2412 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2413 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2414 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2415 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2416 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2417 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2418 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2419 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2420 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2421 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2422 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2423 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2424 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2425 };
2426
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002427 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002428 if (!stats)
2429 return -EMSGSIZE;
2430
2431 for (i = 0; i < ARRAY_SIZE(map); i++)
2432 if (nla_put_u32(skb, map[i].key, map[i].val))
2433 goto msg_full;
2434
2435 nla_nest_end(skb, stats);
2436
2437 return 0;
2438msg_full:
2439 nla_nest_cancel(skb, stats);
2440
2441 return -EMSGSIZE;
2442}
2443
2444/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05002445int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2446 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002447{
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002448 u32 self = tipc_own_addr(net);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002449 struct nlattr *attrs;
2450 struct nlattr *prop;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002451 void *hdr;
2452 int err;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002453
Richard Alpebfb3e5d2015-02-09 09:50:03 +01002454 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002455 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002456 if (!hdr)
2457 return -EMSGSIZE;
2458
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002459 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002460 if (!attrs)
2461 goto msg_full;
2462
2463 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2464 goto attr_msg_full;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002465 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002466 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04002467 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002468 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002469 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002470 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002471 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002472 goto attr_msg_full;
2473
2474 if (tipc_link_is_up(link))
2475 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2476 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04002477 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002478 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2479 goto attr_msg_full;
2480
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002481 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002482 if (!prop)
2483 goto attr_msg_full;
2484 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2485 goto prop_msg_full;
2486 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2487 goto prop_msg_full;
2488 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002489 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002490 goto prop_msg_full;
2491 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2492 goto prop_msg_full;
2493 nla_nest_end(msg->skb, prop);
2494
2495 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2496 if (err)
2497 goto attr_msg_full;
2498
2499 nla_nest_end(msg->skb, attrs);
2500 genlmsg_end(msg->skb, hdr);
2501
2502 return 0;
2503
2504prop_msg_full:
2505 nla_nest_cancel(msg->skb, prop);
2506attr_msg_full:
2507 nla_nest_cancel(msg->skb, attrs);
2508msg_full:
2509 genlmsg_cancel(msg->skb, hdr);
2510
2511 return -EMSGSIZE;
2512}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002513
2514static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2515 struct tipc_stats *stats)
2516{
2517 int i;
2518 struct nlattr *nest;
2519
2520 struct nla_map {
2521 __u32 key;
2522 __u32 val;
2523 };
2524
2525 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002526 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002527 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2528 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2529 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2530 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002531 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002532 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2533 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2534 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2535 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2536 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2537 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2538 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2539 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2540 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2541 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2542 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2543 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2544 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2545 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2546 };
2547
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002548 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002549 if (!nest)
2550 return -EMSGSIZE;
2551
2552 for (i = 0; i < ARRAY_SIZE(map); i++)
2553 if (nla_put_u32(skb, map[i].key, map[i].val))
2554 goto msg_full;
2555
2556 nla_nest_end(skb, nest);
2557
2558 return 0;
2559msg_full:
2560 nla_nest_cancel(skb, nest);
2561
2562 return -EMSGSIZE;
2563}
2564
2565int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2566{
2567 int err;
2568 void *hdr;
2569 struct nlattr *attrs;
2570 struct nlattr *prop;
2571 struct tipc_net *tn = net_generic(net, tipc_net_id);
Hoang Le02ec6ca2019-03-19 18:49:48 +07002572 u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2573 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002574 struct tipc_link *bcl = tn->bcl;
2575
2576 if (!bcl)
2577 return 0;
2578
2579 tipc_bcast_lock(net);
2580
2581 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2582 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002583 if (!hdr) {
2584 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002585 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002586 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002587
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002588 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002589 if (!attrs)
2590 goto msg_full;
2591
2592 /* The broadcast link is always up */
2593 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2594 goto attr_msg_full;
2595
2596 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2597 goto attr_msg_full;
2598 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2599 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002600 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002601 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002602 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002603 goto attr_msg_full;
2604
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002605 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002606 if (!prop)
2607 goto attr_msg_full;
2608 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2609 goto prop_msg_full;
Hoang Le02ec6ca2019-03-19 18:49:48 +07002610 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2611 goto prop_msg_full;
2612 if (bc_mode & BCLINK_MODE_SEL)
2613 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2614 bc_ratio))
2615 goto prop_msg_full;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002616 nla_nest_end(msg->skb, prop);
2617
2618 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2619 if (err)
2620 goto attr_msg_full;
2621
2622 tipc_bcast_unlock(net);
2623 nla_nest_end(msg->skb, attrs);
2624 genlmsg_end(msg->skb, hdr);
2625
2626 return 0;
2627
2628prop_msg_full:
2629 nla_nest_cancel(msg->skb, prop);
2630attr_msg_full:
2631 nla_nest_cancel(msg->skb, attrs);
2632msg_full:
2633 tipc_bcast_unlock(net);
2634 genlmsg_cancel(msg->skb, hdr);
2635
2636 return -EMSGSIZE;
2637}
2638
Richard Alped01332f2016-02-01 08:19:56 +01002639void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2640 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002641{
2642 l->tolerance = tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002643 if (l->bc_rcvlink)
2644 l->bc_rcvlink->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002645 if (link_is_up(l))
2646 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002647}
2648
Richard Alped01332f2016-02-01 08:19:56 +01002649void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2650 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002651{
2652 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002653 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002654}
2655
2656void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2657{
2658 l->abort_limit = limit;
2659}
Tuong Lienb4b97712018-12-19 09:17:56 +07002660
2661char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2662{
2663 if (!l)
2664 scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2665 else if (link_is_bc_sndlink(l))
2666 scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2667 else if (link_is_bc_rcvlink(l))
2668 scnprintf(buf, TIPC_MAX_LINK_NAME,
2669 "broadcast-receiver, peer %x", l->addr);
2670 else
2671 memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2672
2673 return buf;
2674}
2675
2676/**
2677 * tipc_link_dump - dump TIPC link data
2678 * @l: tipc link to be dumped
2679 * @dqueues: bitmask to decide if any link queue to be dumped?
2680 * - TIPC_DUMP_NONE: don't dump link queues
2681 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2682 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2683 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2684 * - TIPC_DUMP_INPUTQ: dump link input queue
2685 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2686 * - TIPC_DUMP_ALL: dump all the link queues above
2687 * @buf: returned buffer of dump data in format
2688 */
2689int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2690{
2691 int i = 0;
2692 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2693 struct sk_buff_head *list;
2694 struct sk_buff *hskb, *tskb;
2695 u32 len;
2696
2697 if (!l) {
2698 i += scnprintf(buf, sz, "link data: (null)\n");
2699 return i;
2700 }
2701
2702 i += scnprintf(buf, sz, "link data: %x", l->addr);
2703 i += scnprintf(buf + i, sz - i, " %x", l->state);
2704 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2705 i += scnprintf(buf + i, sz - i, " %u", l->session);
2706 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2707 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2708 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2709 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2710 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2711 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2712 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2713 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
Tuong Lien71204232019-08-15 10:24:08 +07002714 i += scnprintf(buf + i, sz - i, " %u", 0);
Jon Maloy77cf8ed2019-06-25 17:36:43 +02002715 i += scnprintf(buf + i, sz - i, " %u", 0);
Tuong Lienb4b97712018-12-19 09:17:56 +07002716 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2717
2718 list = &l->transmq;
2719 len = skb_queue_len(list);
2720 hskb = skb_peek(list);
2721 tskb = skb_peek_tail(list);
2722 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2723 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2724 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2725
2726 list = &l->deferdq;
2727 len = skb_queue_len(list);
2728 hskb = skb_peek(list);
2729 tskb = skb_peek_tail(list);
2730 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2731 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2732 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2733
2734 list = &l->backlogq;
2735 len = skb_queue_len(list);
2736 hskb = skb_peek(list);
2737 tskb = skb_peek_tail(list);
2738 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2739 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2740 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2741
2742 list = l->inputq;
2743 len = skb_queue_len(list);
2744 hskb = skb_peek(list);
2745 tskb = skb_peek_tail(list);
2746 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2747 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2748 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2749
2750 if (dqueues & TIPC_DUMP_TRANSMQ) {
2751 i += scnprintf(buf + i, sz - i, "transmq: ");
2752 i += tipc_list_dump(&l->transmq, false, buf + i);
2753 }
2754 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2755 i += scnprintf(buf + i, sz - i,
2756 "backlogq: <%u %u %u %u %u>, ",
2757 l->backlog[TIPC_LOW_IMPORTANCE].len,
2758 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2759 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2760 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2761 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2762 i += tipc_list_dump(&l->backlogq, false, buf + i);
2763 }
2764 if (dqueues & TIPC_DUMP_DEFERDQ) {
2765 i += scnprintf(buf + i, sz - i, "deferdq: ");
2766 i += tipc_list_dump(&l->deferdq, false, buf + i);
2767 }
2768 if (dqueues & TIPC_DUMP_INPUTQ) {
2769 i += scnprintf(buf + i, sz - i, "inputq: ");
2770 i += tipc_list_dump(l->inputq, false, buf + i);
2771 }
2772 if (dqueues & TIPC_DUMP_WAKEUP) {
2773 i += scnprintf(buf + i, sz - i, "wakeup: ");
2774 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2775 }
2776
2777 return i;
2778}