blob: 289e848084ac23810091639365c0bacd58dbb50e [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Tuong Lienb4b97712018-12-19 09:17:56 +070046#include "trace.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010047
Ying Xue796c75d2013-06-17 10:54:48 -040048#include <linux/pkt_sched.h>
49
Jon Paul Maloy38206d52015-11-19 14:30:46 -050050struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050051 u32 sent_pkts;
52 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050053 u32 sent_states;
54 u32 recv_states;
55 u32 sent_probes;
56 u32 recv_probes;
57 u32 sent_nacks;
58 u32 recv_nacks;
59 u32 sent_acks;
60 u32 sent_bundled;
61 u32 sent_bundles;
62 u32 recv_bundled;
63 u32 recv_bundles;
64 u32 retransmitted;
65 u32 sent_fragmented;
66 u32 sent_fragments;
67 u32 recv_fragmented;
68 u32 recv_fragments;
69 u32 link_congs; /* # port sends blocked by congestion */
70 u32 deferred_recv;
71 u32 duplicates;
72 u32 max_queue_sz; /* send queue size high water mark */
73 u32 accu_queue_sz; /* used for send queue size profiling */
74 u32 queue_sz_counts; /* used for send queue size profiling */
75 u32 msg_length_counts; /* used for message length profiling */
76 u32 msg_lengths_total; /* used for message length profiling */
77 u32 msg_length_profile[7]; /* used for msg. length profiling */
78};
79
80/**
81 * struct tipc_link - TIPC link data structure
82 * @addr: network address of link's peer node
83 * @name: link name character string
84 * @media_addr: media address to use when sending messages over link
85 * @timer: link timer
86 * @net: pointer to namespace struct
87 * @refcnt: reference counter for permanent references (owner node & timer)
88 * @peer_session: link session # being used by peer end of link
89 * @peer_bearer_id: bearer id used by link's peer endpoint
90 * @bearer_id: local bearer id used by link
91 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050092 * @abort_limit: # of unacknowledged continuity probes needed to reset link
93 * @state: current state of link FSM
94 * @peer_caps: bitmap describing capabilities of peer node
95 * @silent_intv_cnt: # of timer intervals without any reception from peer
96 * @proto_msg: template for control messages generated by link
97 * @pmsg: convenience pointer to "proto_msg" field
98 * @priority: current link priority
99 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400100 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500101 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
102 * @exp_msg_count: # of tunnelled messages expected during link changeover
103 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
104 * @mtu: current maximum packet size for this link
105 * @advertised_mtu: advertised own mtu when link is being established
106 * @transmitq: queue for sent, non-acked messages
107 * @backlogq: queue for messages waiting to be sent
108 * @snt_nxt: next sequence number to use for outbound messages
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -0500109 * @prev_from: sequence number of most previous retransmission request
Jon Maloya4dc70d2018-07-06 15:22:36 +0200110 * @stale_limit: time when repeated identical retransmits must force link reset
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500111 * @ackers: # of peers that needs to ack each packet before it can be released
112 * @acked: # last packet acked by a certain peer. Used for broadcast.
113 * @rcv_nxt: next sequence number to expect for inbound messages
114 * @deferred_queue: deferred queue saved OOS b'cast message received from node
115 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
116 * @inputq: buffer queue for messages to be delivered upwards
117 * @namedq: buffer queue for name table messages to be delivered upwards
118 * @next_out: ptr to first unsent outbound message in queue
119 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
120 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
121 * @reasm_buf: head of partially reassembled inbound message fragments
122 * @bc_rcvr: marks that this is a broadcast receiver link
123 * @stats: collects statistics regarding link activity
124 */
125struct tipc_link {
126 u32 addr;
127 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500128 struct net *net;
129
130 /* Management and link supervision data */
Jon Maloy7ea817f2018-07-10 01:07:36 +0200131 u16 peer_session;
132 u16 session;
Jon Maloy9012de52018-07-10 01:07:35 +0200133 u16 snd_nxt_state;
134 u16 rcv_nxt_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500135 u32 peer_bearer_id;
136 u32 bearer_id;
137 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500138 u32 abort_limit;
139 u32 state;
140 u16 peer_caps;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200141 bool in_session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500142 bool active;
143 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500144 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500145 u32 priority;
146 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400147 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400148 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500149
150 /* Failover/synch */
151 u16 drop_point;
152 struct sk_buff *failover_reasm_skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +0700153 struct sk_buff_head failover_deferdq;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500154
155 /* Max packet negotiation */
156 u16 mtu;
157 u16 advertised_mtu;
158
159 /* Sending */
160 struct sk_buff_head transmq;
161 struct sk_buff_head backlogq;
162 struct {
163 u16 len;
164 u16 limit;
165 } backlog[5];
166 u16 snd_nxt;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -0500167 u16 prev_from;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500168 u16 window;
Jon Maloya4dc70d2018-07-06 15:22:36 +0200169 unsigned long stale_limit;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500170
171 /* Reception */
172 u16 rcv_nxt;
173 u32 rcv_unacked;
174 struct sk_buff_head deferdq;
175 struct sk_buff_head *inputq;
176 struct sk_buff_head *namedq;
177
178 /* Congestion handling */
179 struct sk_buff_head wakeupq;
180
181 /* Fragmentation/reassembly */
182 struct sk_buff *reasm_buf;
Tuong Lien2320bcd2019-07-24 08:56:12 +0700183 struct sk_buff *reasm_tnlmsg;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500184
185 /* Broadcast */
186 u16 ackers;
187 u16 acked;
188 struct tipc_link *bc_rcvlink;
189 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400190 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500191 bool bc_peer_is_up;
192
193 /* Statistics */
194 struct tipc_stats stats;
195};
196
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400197/*
198 * Error message prefixes
199 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400200static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400201static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100202
Jon Paul Maloy52666982015-10-22 08:51:41 -0400203/* Send states for broadcast NACKs
204 */
205enum {
206 BC_NACK_SND_CONDITIONAL,
207 BC_NACK_SND_UNCONDITIONAL,
208 BC_NACK_SND_SUPPRESS,
209};
210
Jon Maloy53962bc2019-06-28 17:06:20 +0200211#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
Tuong Lien382f5982019-04-04 11:09:52 +0700212#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400213
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900214/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400215 * Interval between NACKs when packets arrive out of order
216 */
217#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500218
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400219/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400220 */
221enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400222 LINK_ESTABLISHED = 0xe,
223 LINK_ESTABLISHING = 0xe << 4,
224 LINK_RESET = 0x1 << 8,
225 LINK_RESETTING = 0x2 << 12,
226 LINK_PEER_RESET = 0xd << 16,
227 LINK_FAILINGOVER = 0xf << 20,
228 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400229};
230
231/* Link FSM state checking routines
232 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400233static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400234{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400235 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400236}
237
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400238static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
239 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400240static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100241 bool probe_reply, u16 rcvgap,
242 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400243 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500244static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400245static int tipc_link_build_nack_msg(struct tipc_link *l,
246 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400247static void tipc_link_build_bc_init_msg(struct tipc_link *l,
248 struct sk_buff_head *xmitq);
249static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Tuong Lien91959482019-04-04 11:09:51 +0700250static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data);
Tuong Lien6a6b5c82019-06-17 12:15:42 +0700251static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
252 struct tipc_gap_ack_blks *ga,
253 struct sk_buff_head *xmitq);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400254
Per Lidenb97bf3f2006-01-02 19:04:38 +0100255/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800256 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100257 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400258bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100259{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400260 return link_is_up(l);
261}
262
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400263bool tipc_link_peer_is_down(struct tipc_link *l)
264{
265 return l->state == LINK_PEER_RESET;
266}
267
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400268bool tipc_link_is_reset(struct tipc_link *l)
269{
270 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
271}
272
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400273bool tipc_link_is_establishing(struct tipc_link *l)
274{
275 return l->state == LINK_ESTABLISHING;
276}
277
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400278bool tipc_link_is_synching(struct tipc_link *l)
279{
280 return l->state == LINK_SYNCHING;
281}
282
283bool tipc_link_is_failingover(struct tipc_link *l)
284{
285 return l->state == LINK_FAILINGOVER;
286}
287
288bool tipc_link_is_blocked(struct tipc_link *l)
289{
290 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100291}
292
Wu Fengguang742e0382015-10-24 22:56:01 +0800293static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400294{
295 return !l->bc_sndlink;
296}
297
Wu Fengguang742e0382015-10-24 22:56:01 +0800298static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400299{
300 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
301}
302
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400303void tipc_link_set_active(struct tipc_link *l, bool active)
304{
305 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100306}
307
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500308u32 tipc_link_id(struct tipc_link *l)
309{
310 return l->peer_bearer_id << 16 | l->bearer_id;
311}
312
313int tipc_link_window(struct tipc_link *l)
314{
315 return l->window;
316}
317
318int tipc_link_prio(struct tipc_link *l)
319{
320 return l->priority;
321}
322
323unsigned long tipc_link_tolerance(struct tipc_link *l)
324{
325 return l->tolerance;
326}
327
328struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
329{
330 return l->inputq;
331}
332
333char tipc_link_plane(struct tipc_link *l)
334{
335 return l->net_plane;
336}
337
Jon Maloy9012de52018-07-10 01:07:35 +0200338void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
339{
340 l->peer_caps = capabilities;
341}
342
Jon Paul Maloy52666982015-10-22 08:51:41 -0400343void tipc_link_add_bc_peer(struct tipc_link *snd_l,
344 struct tipc_link *uc_l,
345 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400346{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400347 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
348
349 snd_l->ackers++;
350 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500351 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400352 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400353}
354
Jon Paul Maloy52666982015-10-22 08:51:41 -0400355void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
356 struct tipc_link *rcv_l,
357 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400358{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400359 u16 ack = snd_l->snd_nxt - 1;
360
361 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400362 rcv_l->bc_peer_is_up = true;
363 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400364 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
Tuong Lien26574db2018-12-19 09:17:57 +0700365 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400366 tipc_link_reset(rcv_l);
367 rcv_l->state = LINK_RESET;
368 if (!snd_l->ackers) {
Tuong Lien26574db2018-12-19 09:17:57 +0700369 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400370 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500371 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400372 __skb_queue_purge(xmitq);
373 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400374}
375
376int tipc_link_bc_peers(struct tipc_link *l)
377{
378 return l->ackers;
379}
380
YueHaibinge064cce2018-07-19 17:16:59 +0800381static u16 link_bc_rcv_gap(struct tipc_link *l)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400382{
383 struct sk_buff *skb = skb_peek(&l->deferdq);
384 u16 gap = 0;
385
386 if (more(l->snd_nxt, l->rcv_nxt))
387 gap = l->snd_nxt - l->rcv_nxt;
388 if (skb)
389 gap = buf_seqno(skb) - l->rcv_nxt;
390 return gap;
391}
392
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400393void tipc_link_set_mtu(struct tipc_link *l, int mtu)
394{
395 l->mtu = mtu;
396}
397
398int tipc_link_mtu(struct tipc_link *l)
399{
400 return l->mtu;
401}
402
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500403u16 tipc_link_rcv_nxt(struct tipc_link *l)
404{
405 return l->rcv_nxt;
406}
407
408u16 tipc_link_acked(struct tipc_link *l)
409{
410 return l->acked;
411}
412
413char *tipc_link_name(struct tipc_link *l)
414{
415 return l->name;
416}
417
LUU Duc Canhc140eb12018-09-26 21:00:54 +0200418u32 tipc_link_state(struct tipc_link *l)
419{
420 return l->state;
421}
422
Per Lidenb97bf3f2006-01-02 19:04:38 +0100423/**
Per Liden4323add2006-01-18 00:38:21 +0100424 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400425 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400426 * @if_name: associated interface name
427 * @bearer_id: id (index) of associated bearer
428 * @tolerance: link tolerance to be used by link
429 * @net_plane: network plane (A,B,c..) this link belongs to
430 * @mtu: mtu to be advertised by link
431 * @priority: priority to be used by link
432 * @window: send window to be used by link
433 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400434 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400435 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400436 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400437 * @bc_sndlink: the namespace global link used for broadcast sending
438 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400439 * @inputq: queue to put messages ready for delivery
440 * @namedq: queue to put binding table update messages ready for delivery
441 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900442 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400443 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100444 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400445bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400446 int tolerance, char net_plane, u32 mtu, int priority,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100447 int window, u32 session, u32 self,
448 u32 peer, u8 *peer_id, u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400449 struct tipc_link *bc_sndlink,
450 struct tipc_link *bc_rcvlink,
451 struct sk_buff_head *inputq,
452 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400453 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100454{
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100455 char peer_str[NODE_ID_STR_LEN] = {0,};
456 char self_str[NODE_ID_STR_LEN] = {0,};
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400457 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500458
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400459 l = kzalloc(sizeof(*l), GFP_ATOMIC);
460 if (!l)
461 return false;
462 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500463 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400464
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100465 /* Set link name for unicast links only */
466 if (peer_id) {
467 tipc_nodeid2string(self_str, tipc_own_id(net));
468 if (strlen(self_str) > 16)
469 sprintf(self_str, "%x", self);
470 tipc_nodeid2string(peer_str, peer_id);
471 if (strlen(peer_str) > 16)
472 sprintf(peer_str, "%x", peer);
473 }
474 /* Peer i/f name will be completed by reset/activate message */
Jon Maloy7494cfa2018-03-29 23:20:45 +0200475 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
476 self_str, if_name, peer_str);
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100477
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500478 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400479 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400480 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400481 l->net = net;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200482 l->in_session = false;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400483 l->bearer_id = bearer_id;
484 l->tolerance = tolerance;
Jon Maloy047491e2018-10-10 17:34:01 +0200485 if (bc_rcvlink)
486 bc_rcvlink->tolerance = tolerance;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400487 l->net_plane = net_plane;
488 l->advertised_mtu = mtu;
489 l->mtu = mtu;
490 l->priority = priority;
491 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400492 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400493 l->bc_sndlink = bc_sndlink;
494 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400495 l->inputq = inputq;
496 l->namedq = namedq;
497 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400498 __skb_queue_head_init(&l->transmq);
499 __skb_queue_head_init(&l->backlogq);
500 __skb_queue_head_init(&l->deferdq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700501 __skb_queue_head_init(&l->failover_deferdq);
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400502 skb_queue_head_init(&l->wakeupq);
503 skb_queue_head_init(l->inputq);
504 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100505}
506
Jon Paul Maloy32301902015-10-22 08:51:37 -0400507/**
508 * tipc_link_bc_create - create new link to be used for broadcast
509 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100510 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400511 * @window: send window to be used
512 * @inputq: queue to put messages ready for delivery
513 * @namedq: queue to put binding table update messages ready for delivery
514 * @link: return value, pointer to put the created link
515 *
516 * Returns true if link was created, otherwise false
517 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400518bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400519 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400520 struct sk_buff_head *inputq,
521 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400522 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400523 struct tipc_link **link)
524{
525 struct tipc_link *l;
526
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400527 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100528 0, ownnode, peer, NULL, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400529 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400530 return false;
531
532 l = *link;
533 strcpy(l->name, tipc_bclink_name);
Tuong Lien26574db2018-12-19 09:17:57 +0700534 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
Jon Paul Maloy32301902015-10-22 08:51:37 -0400535 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400536 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400537 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400538 l->bc_rcvlink = l;
539
540 /* Broadcast send link is always up */
541 if (link_is_bc_sndlink(l))
542 l->state = LINK_ESTABLISHED;
543
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500544 /* Disable replicast if even a single peer doesn't support it */
545 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
546 tipc_bcast_disable_rcast(net);
547
Jon Paul Maloy32301902015-10-22 08:51:37 -0400548 return true;
549}
550
Per Lidenb97bf3f2006-01-02 19:04:38 +0100551/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400552 * tipc_link_fsm_evt - link finite state machine
553 * @l: pointer to link
554 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400555 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400556int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400557{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400558 int rc = 0;
Tuong Lien26574db2018-12-19 09:17:57 +0700559 int old_state = l->state;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400560
561 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400562 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400563 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400564 case LINK_PEER_RESET_EVT:
565 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400566 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400567 case LINK_RESET_EVT:
568 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400569 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400570 case LINK_FAILURE_EVT:
571 case LINK_FAILOVER_BEGIN_EVT:
572 case LINK_ESTABLISH_EVT:
573 case LINK_FAILOVER_END_EVT:
574 case LINK_SYNCH_BEGIN_EVT:
575 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400576 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400577 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400578 }
579 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400580 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400581 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400582 case LINK_PEER_RESET_EVT:
583 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400584 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400585 case LINK_FAILOVER_BEGIN_EVT:
586 l->state = LINK_FAILINGOVER;
587 case LINK_FAILURE_EVT:
588 case LINK_RESET_EVT:
589 case LINK_ESTABLISH_EVT:
590 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400591 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400592 case LINK_SYNCH_BEGIN_EVT:
593 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400594 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400595 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400596 }
597 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400598 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400599 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400600 case LINK_RESET_EVT:
601 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400602 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400603 case LINK_PEER_RESET_EVT:
604 case LINK_ESTABLISH_EVT:
605 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400606 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400607 case LINK_SYNCH_BEGIN_EVT:
608 case LINK_SYNCH_END_EVT:
609 case LINK_FAILOVER_BEGIN_EVT:
610 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400611 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400612 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400613 }
614 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400615 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400616 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400617 case LINK_FAILOVER_END_EVT:
618 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400619 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400620 case LINK_PEER_RESET_EVT:
621 case LINK_RESET_EVT:
622 case LINK_ESTABLISH_EVT:
623 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400624 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400625 case LINK_FAILOVER_BEGIN_EVT:
626 case LINK_SYNCH_BEGIN_EVT:
627 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400628 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400629 goto illegal_evt;
630 }
631 break;
632 case LINK_ESTABLISHING:
633 switch (evt) {
634 case LINK_ESTABLISH_EVT:
635 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400636 break;
637 case LINK_FAILOVER_BEGIN_EVT:
638 l->state = LINK_FAILINGOVER;
639 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400640 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400641 l->state = LINK_RESET;
642 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400643 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400644 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400645 case LINK_SYNCH_BEGIN_EVT:
646 case LINK_FAILOVER_END_EVT:
647 break;
648 case LINK_SYNCH_END_EVT:
649 default:
650 goto illegal_evt;
651 }
652 break;
653 case LINK_ESTABLISHED:
654 switch (evt) {
655 case LINK_PEER_RESET_EVT:
656 l->state = LINK_PEER_RESET;
657 rc |= TIPC_LINK_DOWN_EVT;
658 break;
659 case LINK_FAILURE_EVT:
660 l->state = LINK_RESETTING;
661 rc |= TIPC_LINK_DOWN_EVT;
662 break;
663 case LINK_RESET_EVT:
664 l->state = LINK_RESET;
665 break;
666 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400667 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400668 break;
669 case LINK_SYNCH_BEGIN_EVT:
670 l->state = LINK_SYNCHING;
671 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400672 case LINK_FAILOVER_BEGIN_EVT:
673 case LINK_FAILOVER_END_EVT:
674 default:
675 goto illegal_evt;
676 }
677 break;
678 case LINK_SYNCHING:
679 switch (evt) {
680 case LINK_PEER_RESET_EVT:
681 l->state = LINK_PEER_RESET;
682 rc |= TIPC_LINK_DOWN_EVT;
683 break;
684 case LINK_FAILURE_EVT:
685 l->state = LINK_RESETTING;
686 rc |= TIPC_LINK_DOWN_EVT;
687 break;
688 case LINK_RESET_EVT:
689 l->state = LINK_RESET;
690 break;
691 case LINK_ESTABLISH_EVT:
692 case LINK_SYNCH_BEGIN_EVT:
693 break;
694 case LINK_SYNCH_END_EVT:
695 l->state = LINK_ESTABLISHED;
696 break;
697 case LINK_FAILOVER_BEGIN_EVT:
698 case LINK_FAILOVER_END_EVT:
699 default:
700 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400701 }
702 break;
703 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400704 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400705 }
Tuong Lien26574db2018-12-19 09:17:57 +0700706 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400707 return rc;
708illegal_evt:
709 pr_err("Illegal FSM event %x in state %x on link %s\n",
710 evt, l->state, l->name);
Tuong Lien26574db2018-12-19 09:17:57 +0700711 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400712 return rc;
713}
714
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400715/* link_profile_stats - update statistical profiling of traffic
716 */
717static void link_profile_stats(struct tipc_link *l)
718{
719 struct sk_buff *skb;
720 struct tipc_msg *msg;
721 int length;
722
723 /* Update counters used in statistical profiling of send traffic */
724 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
725 l->stats.queue_sz_counts++;
726
727 skb = skb_peek(&l->transmq);
728 if (!skb)
729 return;
730 msg = buf_msg(skb);
731 length = msg_size(msg);
732
733 if (msg_user(msg) == MSG_FRAGMENTER) {
734 if (msg_type(msg) != FIRST_FRAGMENT)
735 return;
Jon Maloya7dc51a2019-06-25 19:37:00 +0200736 length = msg_size(msg_inner_hdr(msg));
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400737 }
738 l->stats.msg_lengths_total += length;
739 l->stats.msg_length_counts++;
740 if (length <= 64)
741 l->stats.msg_length_profile[0]++;
742 else if (length <= 256)
743 l->stats.msg_length_profile[1]++;
744 else if (length <= 1024)
745 l->stats.msg_length_profile[2]++;
746 else if (length <= 4096)
747 l->stats.msg_length_profile[3]++;
748 else if (length <= 16384)
749 l->stats.msg_length_profile[4]++;
750 else if (length <= 32768)
751 l->stats.msg_length_profile[5]++;
752 else
753 l->stats.msg_length_profile[6]++;
754}
755
Tuong Lien26574db2018-12-19 09:17:57 +0700756/**
757 * tipc_link_too_silent - check if link is "too silent"
758 * @l: tipc link to be checked
759 *
760 * Returns true if the link 'silent_intv_cnt' is about to reach the
761 * 'abort_limit' value, otherwise false
762 */
763bool tipc_link_too_silent(struct tipc_link *l)
764{
765 return (l->silent_intv_cnt + 2 > l->abort_limit);
766}
767
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400768/* tipc_link_timeout - perform periodic task as instructed from node timeout
769 */
770int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
771{
Ying Xuec91522f2016-06-15 14:11:31 +0800772 int mtyp = 0;
773 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400774 bool state = false;
775 bool probe = false;
776 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400777 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
778 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400779 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400780
Tuong Lien26574db2018-12-19 09:17:57 +0700781 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
782 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400783 switch (l->state) {
784 case LINK_ESTABLISHED:
785 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400786 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400787 link_profile_stats(l);
788 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
789 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
790 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400791 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400792 state |= l->bc_rcvlink->rcv_unacked;
793 state |= l->rcv_unacked;
794 state |= !skb_queue_empty(&l->transmq);
795 state |= !skb_queue_empty(&l->deferdq);
796 probe = mstate->probing;
797 probe |= l->silent_intv_cnt;
798 if (probe || mstate->monitoring)
799 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400800 break;
801 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400802 setup = l->rst_cnt++ <= 4;
803 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400804 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400805 break;
806 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400807 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400808 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400809 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400810 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400811 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400812 case LINK_FAILINGOVER:
813 break;
814 default:
815 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400816 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400817
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400818 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100819 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400820
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400821 return rc;
822}
823
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400824/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400825 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500826 * @l: congested link
827 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400828 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100829 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500830static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100831{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500832 u32 dnode = tipc_own_addr(l->net);
833 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400834 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100835
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400836 /* Create and schedule wakeup pseudo message */
837 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500838 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400839 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400840 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500841 msg_set_dest_droppable(buf_msg(skb), true);
842 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
843 skb_queue_tail(&l->wakeupq, skb);
844 l->stats.link_congs++;
Tuong Lien26574db2018-12-19 09:17:57 +0700845 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400846 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100847}
848
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400849/**
850 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500851 * @l: congested link
852 * Wake up a number of waiting users, as permitted by available space
853 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400854 */
YueHaibinge064cce2018-07-19 17:16:59 +0800855static void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100856{
Jon Maloy7c5b4202019-07-30 16:23:18 +0200857 struct sk_buff_head *wakeupq = &l->wakeupq;
858 struct sk_buff_head *inputq = l->inputq;
Ying Xue58d78b32014-11-26 11:41:51 +0800859 struct sk_buff *skb, *tmp;
Jon Maloy7c5b4202019-07-30 16:23:18 +0200860 struct sk_buff_head tmpq;
861 int avail[5] = {0,};
862 int imp = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100863
Jon Maloy7c5b4202019-07-30 16:23:18 +0200864 __skb_queue_head_init(&tmpq);
865
866 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
867 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
868
869 skb_queue_walk_safe(wakeupq, skb, tmp) {
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400870 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Maloy7c5b4202019-07-30 16:23:18 +0200871 if (avail[imp] <= 0)
872 continue;
873 avail[imp]--;
874 __skb_unlink(skb, wakeupq);
875 __skb_queue_tail(&tmpq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100876 }
Jon Maloy7c5b4202019-07-30 16:23:18 +0200877
878 spin_lock_bh(&inputq->lock);
879 skb_queue_splice_tail(&tmpq, inputq);
880 spin_unlock_bh(&inputq->lock);
881
Per Lidenb97bf3f2006-01-02 19:04:38 +0100882}
883
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400884void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100885{
Ying Xuea1f8dd32018-10-11 19:57:56 +0800886 struct sk_buff_head list;
887
888 __skb_queue_head_init(&list);
889
Jon Maloy7ea817f2018-07-10 01:07:36 +0200890 l->in_session = false;
Tuong Lienf7a93782019-04-16 10:48:07 +0700891 /* Force re-synch of peer session number before establishing */
892 l->peer_session--;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500893 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400894 l->mtu = l->advertised_mtu;
Ying Xuea1f8dd32018-10-11 19:57:56 +0800895
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200896 spin_lock_bh(&l->wakeupq.lock);
Ying Xuea1f8dd32018-10-11 19:57:56 +0800897 skb_queue_splice_init(&l->wakeupq, &list);
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200898 spin_unlock_bh(&l->wakeupq.lock);
899
Ying Xuea1f8dd32018-10-11 19:57:56 +0800900 spin_lock_bh(&l->inputq->lock);
901 skb_queue_splice_init(&list, l->inputq);
902 spin_unlock_bh(&l->inputq->lock);
903
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400904 __skb_queue_purge(&l->transmq);
905 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400906 __skb_queue_purge(&l->backlogq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700907 __skb_queue_purge(&l->failover_deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400908 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
909 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
910 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
911 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
912 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400913 kfree_skb(l->reasm_buf);
Tuong Lien2320bcd2019-07-24 08:56:12 +0700914 kfree_skb(l->reasm_tnlmsg);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400915 kfree_skb(l->failover_reasm_skb);
916 l->reasm_buf = NULL;
Tuong Lien2320bcd2019-07-24 08:56:12 +0700917 l->reasm_tnlmsg = NULL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400918 l->failover_reasm_skb = NULL;
919 l->rcv_unacked = 0;
920 l->snd_nxt = 1;
921 l->rcv_nxt = 1;
Jon Maloy9012de52018-07-10 01:07:35 +0200922 l->snd_nxt_state = 1;
923 l->rcv_nxt_state = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400924 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400925 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400926 l->rst_cnt = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400927 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400928 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500929 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100930}
931
Per Lidenb97bf3f2006-01-02 19:04:38 +0100932/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400933 * tipc_link_xmit(): enqueue buffer list according to queue situation
934 * @link: link to use
935 * @list: chain of buffers containing message
936 * @xmitq: returned list of packets to be sent by caller
937 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500938 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400939 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
940 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
941 */
942int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
943 struct sk_buff_head *xmitq)
944{
945 struct tipc_msg *hdr = buf_msg(skb_peek(list));
946 unsigned int maxwin = l->window;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500947 int imp = msg_importance(hdr);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400948 unsigned int mtu = l->mtu;
949 u16 ack = l->rcv_nxt - 1;
950 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400951 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400952 struct sk_buff_head *transmq = &l->transmq;
953 struct sk_buff_head *backlogq = &l->backlogq;
954 struct sk_buff *skb, *_skb, *bskb;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500955 int pkt_cnt = skb_queue_len(list);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500956 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400957
Richard Alpe4952cd32016-02-11 10:43:15 +0100958 if (unlikely(msg_size(hdr) > mtu)) {
Tuong Lien2320bcd2019-07-24 08:56:12 +0700959 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
960 skb_queue_len(list), msg_user(hdr),
961 msg_type(hdr), msg_size(hdr), mtu);
Jon Maloye654f9f2019-08-15 16:42:50 +0200962 __skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400963 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100964 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400965
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500966 /* Allow oversubscription of one data msg per source at congestion */
967 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
968 if (imp == TIPC_SYSTEM_IMPORTANCE) {
969 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
970 return -ENOBUFS;
971 }
972 rc = link_schedule_user(l, hdr);
973 }
974
Jon Paul Maloy95901122016-11-25 10:35:02 -0500975 if (pkt_cnt > 1) {
976 l->stats.sent_fragmented++;
977 l->stats.sent_fragments += pkt_cnt;
978 }
979
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400980 /* Prepare each packet for sending, and add to relevant queue: */
981 while (skb_queue_len(list)) {
982 skb = skb_peek(list);
983 hdr = buf_msg(skb);
984 msg_set_seqno(hdr, seqno);
985 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400986 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400987
988 if (likely(skb_queue_len(transmq) < maxwin)) {
989 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100990 if (!_skb) {
Jon Maloye654f9f2019-08-15 16:42:50 +0200991 __skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400992 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100993 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400994 __skb_dequeue(list);
995 __skb_queue_tail(transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +0700996 /* next retransmit attempt */
997 if (link_is_bc_sndlink(l))
Jon Maloy53962bc2019-06-28 17:06:20 +0200998 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400999 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001000 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001001 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001002 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001003 seqno++;
1004 continue;
1005 }
1006 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
1007 kfree_skb(__skb_dequeue(list));
1008 l->stats.sent_bundled++;
1009 continue;
1010 }
1011 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
1012 kfree_skb(__skb_dequeue(list));
1013 __skb_queue_tail(backlogq, bskb);
1014 l->backlog[msg_importance(buf_msg(bskb))].len++;
1015 l->stats.sent_bundled++;
1016 l->stats.sent_bundles++;
1017 continue;
1018 }
1019 l->backlog[imp].len += skb_queue_len(list);
1020 skb_queue_splice_tail_init(list, backlogq);
1021 }
1022 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001023 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001024}
1025
YueHaibinge064cce2018-07-19 17:16:59 +08001026static void tipc_link_advance_backlog(struct tipc_link *l,
1027 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001028{
1029 struct sk_buff *skb, *_skb;
1030 struct tipc_msg *hdr;
1031 u16 seqno = l->snd_nxt;
1032 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001033 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001034
1035 while (skb_queue_len(&l->transmq) < l->window) {
1036 skb = skb_peek(&l->backlogq);
1037 if (!skb)
1038 break;
1039 _skb = skb_clone(skb, GFP_ATOMIC);
1040 if (!_skb)
1041 break;
1042 __skb_dequeue(&l->backlogq);
1043 hdr = buf_msg(skb);
1044 l->backlog[msg_importance(hdr)].len--;
1045 __skb_queue_tail(&l->transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +07001046 /* next retransmit attempt */
1047 if (link_is_bc_sndlink(l))
Jon Maloy53962bc2019-06-28 17:06:20 +02001048 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Hoang Le05572272018-12-19 11:42:19 +07001049
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001050 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001051 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001052 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001053 msg_set_ack(hdr, ack);
1054 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001055 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001056 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001057 seqno++;
1058 }
1059 l->snd_nxt = seqno;
1060}
1061
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001062/**
1063 * link_retransmit_failure() - Detect repeated retransmit failures
1064 * @l: tipc link sender
1065 * @r: tipc link receiver (= l in case of unicast)
1066 * @from: seqno of the 1st packet in retransmit request
1067 * @rc: returned code
1068 *
1069 * Return: true if the repeated retransmit failures happens, otherwise
1070 * false
1071 */
1072static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
1073 u16 from, int *rc)
Allan Stephensd356eeb2006-06-25 23:40:01 -07001074{
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001075 struct sk_buff *skb = skb_peek(&l->transmq);
1076 struct tipc_msg *hdr;
Allan Stephensd356eeb2006-06-25 23:40:01 -07001077
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001078 if (!skb)
1079 return false;
1080 hdr = buf_msg(skb);
1081
1082 /* Detect repeated retransmit failures on same packet */
1083 if (r->prev_from != from) {
1084 r->prev_from = from;
1085 r->stale_limit = jiffies + msecs_to_jiffies(r->tolerance);
Jon Maloy77cf8ed2019-06-25 17:36:43 +02001086 } else if (time_after(jiffies, r->stale_limit)) {
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001087 pr_warn("Retransmission failure on link <%s>\n", l->name);
1088 link_print(l, "State of link ");
1089 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1090 msg_user(hdr), msg_type(hdr), msg_size(hdr),
1091 msg_errcode(hdr));
1092 pr_info("sqno %u, prev: %x, src: %x\n",
1093 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
1094
1095 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1096 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1097 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
1098
1099 if (link_is_bc_sndlink(l))
1100 *rc = TIPC_LINK_DOWN_EVT;
1101
1102 *rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1103 return true;
1104 }
1105
1106 return false;
Allan Stephensd356eeb2006-06-25 23:40:01 -07001107}
1108
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001109/* tipc_link_bc_retrans() - retransmit zero or more packets
Jon Maloya4dc70d2018-07-06 15:22:36 +02001110 * @l: the link to transmit on
1111 * @r: the receiving link ordering the retransmit. Same as l if unicast
1112 * @from: retransmit from (inclusive) this sequence number
1113 * @to: retransmit to (inclusive) this sequence number
1114 * xmitq: queue for accumulating the retransmitted packets
1115 */
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001116static int tipc_link_bc_retrans(struct tipc_link *l, struct tipc_link *r,
1117 u16 from, u16 to, struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001118{
1119 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001120 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Maloya4dc70d2018-07-06 15:22:36 +02001121 u16 ack = l->rcv_nxt - 1;
1122 struct tipc_msg *hdr;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001123 int rc = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001124
1125 if (!skb)
1126 return 0;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001127 if (less(to, from))
1128 return 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001129
Tuong Lien26574db2018-12-19 09:17:57 +07001130 trace_tipc_link_retrans(r, from, to, &l->transmq);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001131
1132 if (link_retransmit_failure(l, r, from, &rc))
1133 return rc;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001134
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001135 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001136 hdr = buf_msg(skb);
Jon Maloya4dc70d2018-07-06 15:22:36 +02001137 if (less(msg_seqno(hdr), from))
1138 continue;
1139 if (more(msg_seqno(hdr), to))
1140 break;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001141 if (link_is_bc_sndlink(l)) {
1142 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1143 continue;
Jon Maloy53962bc2019-06-28 17:06:20 +02001144 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
LUU Duc Canh31c4f4c2018-11-10 14:23:50 -05001145 }
Jon Maloy20c67312019-06-25 18:08:13 +02001146 _skb = __pskb_copy(skb, LL_MAX_HEADER + MIN_H_SIZE, GFP_ATOMIC);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001147 if (!_skb)
1148 return 0;
1149 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001150 msg_set_ack(hdr, ack);
1151 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001152 _skb->priority = TC_PRIO_CONTROL;
1153 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001154 l->stats.retransmitted++;
1155 }
1156 return 0;
1157}
1158
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001159/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001160 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001161 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001162 * Node lock must be held
1163 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001164static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001165 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001166{
Jon Maloy399574d2017-10-13 11:04:32 +02001167 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001168 struct tipc_msg *hdr = buf_msg(skb);
1169
1170 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001171 case TIPC_LOW_IMPORTANCE:
1172 case TIPC_MEDIUM_IMPORTANCE:
1173 case TIPC_HIGH_IMPORTANCE:
1174 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001175 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001176 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001177 return true;
1178 }
Gustavo A. R. Silvaf79e3362019-01-23 01:09:31 -06001179 /* fall through */
Jon Maloy2f487712017-10-13 11:04:31 +02001180 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001181 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001182 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001183 case GROUP_PROTOCOL:
1184 skb_queue_tail(mc_inputq, skb);
1185 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001186 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001187 l->bc_rcvlink->state = LINK_ESTABLISHED;
1188 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001189 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001190 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001191 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001192 case MSG_FRAGMENTER:
1193 case BCAST_PROTOCOL:
1194 return false;
1195 default:
1196 pr_warn("Dropping received illegal msg type\n");
1197 kfree_skb(skb);
Hoang Le7384b532019-02-11 09:18:28 +07001198 return true;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001199 };
1200}
1201
1202/* tipc_link_input - process packet that has passed link protocol check
1203 *
1204 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001205 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001206static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
Tuong Lien58ee86b2019-04-04 11:09:53 +07001207 struct sk_buff_head *inputq,
1208 struct sk_buff **reasm_skb)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001209{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001210 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001211 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001212 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001213 int usr = msg_user(hdr);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001214 int pos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001215
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001216 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001217 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001218 l->stats.recv_bundles++;
1219 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001220 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001221 tipc_data_input(l, iskb, &tmpq);
1222 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001223 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001224 } else if (usr == MSG_FRAGMENTER) {
1225 l->stats.recv_fragments++;
1226 if (tipc_buf_append(reasm_skb, &skb)) {
1227 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001228 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001229 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1230 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001231 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001232 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001233 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001234 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001235 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001236 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001237 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001238 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001239
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001240 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001241 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001242}
1243
Tuong Lien58ee86b2019-04-04 11:09:53 +07001244/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1245 * inner message along with the ones in the old link's
1246 * deferdq
1247 * @l: tunnel link
1248 * @skb: TUNNEL_PROTOCOL message
1249 * @inputq: queue to put messages ready for delivery
1250 */
1251static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1252 struct sk_buff_head *inputq)
1253{
1254 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001255 struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001256 struct sk_buff_head *fdefq = &l->failover_deferdq;
1257 struct tipc_msg *hdr = buf_msg(skb);
1258 struct sk_buff *iskb;
1259 int ipos = 0;
1260 int rc = 0;
1261 u16 seqno;
1262
Tuong Lien2320bcd2019-07-24 08:56:12 +07001263 if (msg_type(hdr) == SYNCH_MSG) {
1264 kfree_skb(skb);
1265 return 0;
1266 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001267
Tuong Lien2320bcd2019-07-24 08:56:12 +07001268 /* Not a fragment? */
1269 if (likely(!msg_nof_fragms(hdr))) {
1270 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1271 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1272 skb_queue_len(fdefq));
1273 return 0;
1274 }
1275 kfree_skb(skb);
1276 } else {
1277 /* Set fragment type for buf_append */
1278 if (msg_fragm_no(hdr) == 1)
1279 msg_set_type(hdr, FIRST_FRAGMENT);
1280 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1281 msg_set_type(hdr, FRAGMENT);
1282 else
1283 msg_set_type(hdr, LAST_FRAGMENT);
1284
1285 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1286 /* Successful but non-complete reassembly? */
1287 if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1288 return 0;
1289 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1290 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1291 }
1292 iskb = skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001293 }
1294
1295 do {
1296 seqno = buf_seqno(iskb);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001297 if (unlikely(less(seqno, l->drop_point))) {
1298 kfree_skb(iskb);
1299 continue;
1300 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001301 if (unlikely(seqno != l->drop_point)) {
1302 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1303 continue;
1304 }
1305
1306 l->drop_point++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001307 if (!tipc_data_input(l, iskb, inputq))
1308 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1309 if (unlikely(rc))
1310 break;
1311 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1312
Tuong Lien58ee86b2019-04-04 11:09:53 +07001313 return rc;
1314}
1315
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001316static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1317{
1318 bool released = false;
1319 struct sk_buff *skb, *tmp;
1320
1321 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1322 if (more(buf_seqno(skb), acked))
1323 break;
1324 __skb_unlink(skb, &l->transmq);
1325 kfree_skb(skb);
1326 released = true;
1327 }
1328 return released;
1329}
1330
Tuong Lien91959482019-04-04 11:09:51 +07001331/* tipc_build_gap_ack_blks - build Gap ACK blocks
1332 * @l: tipc link that data have come with gaps in sequence if any
1333 * @data: data buffer to store the Gap ACK blocks after built
1334 *
1335 * returns the actual allocated memory size
1336 */
1337static u16 tipc_build_gap_ack_blks(struct tipc_link *l, void *data)
1338{
1339 struct sk_buff *skb = skb_peek(&l->deferdq);
1340 struct tipc_gap_ack_blks *ga = data;
1341 u16 len, expect, seqno = 0;
1342 u8 n = 0;
1343
1344 if (!skb)
1345 goto exit;
1346
1347 expect = buf_seqno(skb);
1348 skb_queue_walk(&l->deferdq, skb) {
1349 seqno = buf_seqno(skb);
1350 if (unlikely(more(seqno, expect))) {
1351 ga->gacks[n].ack = htons(expect - 1);
1352 ga->gacks[n].gap = htons(seqno - expect);
1353 if (++n >= MAX_GAP_ACK_BLKS) {
1354 pr_info_ratelimited("Too few Gap ACK blocks!\n");
1355 goto exit;
1356 }
1357 } else if (unlikely(less(seqno, expect))) {
1358 pr_warn("Unexpected skb in deferdq!\n");
1359 continue;
1360 }
1361 expect = seqno + 1;
1362 }
1363
1364 /* last block */
1365 ga->gacks[n].ack = htons(seqno);
1366 ga->gacks[n].gap = 0;
1367 n++;
1368
1369exit:
1370 len = tipc_gap_ack_blks_sz(n);
1371 ga->len = htons(len);
1372 ga->gack_cnt = n;
1373 return len;
1374}
1375
1376/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1377 * acked packets, also doing retransmissions if
1378 * gaps found
1379 * @l: tipc link with transmq queue to be advanced
1380 * @acked: seqno of last packet acked by peer without any gaps before
1381 * @gap: # of gap packets
1382 * @ga: buffer pointer to Gap ACK blocks from peer
1383 * @xmitq: queue for accumulating the retransmitted packets if any
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001384 *
1385 * In case of a repeated retransmit failures, the call will return shortly
1386 * with a returned code (e.g. TIPC_LINK_DOWN_EVT)
Tuong Lien91959482019-04-04 11:09:51 +07001387 */
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001388static int tipc_link_advance_transmq(struct tipc_link *l, u16 acked, u16 gap,
1389 struct tipc_gap_ack_blks *ga,
1390 struct sk_buff_head *xmitq)
Tuong Lien91959482019-04-04 11:09:51 +07001391{
1392 struct sk_buff *skb, *_skb, *tmp;
1393 struct tipc_msg *hdr;
1394 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1395 u16 ack = l->rcv_nxt - 1;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001396 u16 seqno, n = 0;
1397 int rc = 0;
1398
1399 if (gap && link_retransmit_failure(l, l, acked + 1, &rc))
1400 return rc;
Tuong Lien91959482019-04-04 11:09:51 +07001401
1402 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1403 seqno = buf_seqno(skb);
1404
1405next_gap_ack:
1406 if (less_eq(seqno, acked)) {
1407 /* release skb */
1408 __skb_unlink(skb, &l->transmq);
1409 kfree_skb(skb);
1410 } else if (less_eq(seqno, acked + gap)) {
1411 /* retransmit skb */
Tuong Lien382f5982019-04-04 11:09:52 +07001412 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1413 continue;
1414 TIPC_SKB_CB(skb)->nxt_retr = TIPC_UC_RETR_TIME;
1415
Tuong Lien91959482019-04-04 11:09:51 +07001416 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1417 if (!_skb)
1418 continue;
1419 hdr = buf_msg(_skb);
1420 msg_set_ack(hdr, ack);
1421 msg_set_bcast_ack(hdr, bc_ack);
1422 _skb->priority = TC_PRIO_CONTROL;
1423 __skb_queue_tail(xmitq, _skb);
1424 l->stats.retransmitted++;
1425 } else {
1426 /* retry with Gap ACK blocks if any */
1427 if (!ga || n >= ga->gack_cnt)
1428 break;
1429 acked = ntohs(ga->gacks[n].ack);
1430 gap = ntohs(ga->gacks[n].gap);
1431 n++;
1432 goto next_gap_ack;
1433 }
1434 }
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001435
1436 return 0;
Tuong Lien91959482019-04-04 11:09:51 +07001437}
1438
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001439/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001440 *
1441 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1442 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001443 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001444int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001445{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001446 if (!l)
1447 return 0;
1448
1449 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1450 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001451 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001452 return 0;
1453 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001454
1455 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1456 l->snd_nxt = l->rcv_nxt;
1457 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001458 }
1459
1460 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001461 l->rcv_unacked = 0;
1462 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001463 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001464 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001465}
1466
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001467/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1468 */
1469void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1470{
1471 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001472 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001473
1474 if (l->state == LINK_ESTABLISHING)
1475 mtyp = ACTIVATE_MSG;
1476
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001477 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001478
1479 /* Inform peer that this endpoint is going down if applicable */
1480 skb = skb_peek_tail(xmitq);
1481 if (skb && (l->state == LINK_RESET))
1482 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001483}
1484
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001485/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001486 * Note that sending of broadcast NACK is coordinated among nodes, to
1487 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001488 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001489static int tipc_link_build_nack_msg(struct tipc_link *l,
1490 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001491{
1492 u32 def_cnt = ++l->stats.deferred_recv;
Tuong Lien382f5982019-04-04 11:09:52 +07001493 u32 defq_len = skb_queue_len(&l->deferdq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001494 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001495
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001496 if (link_is_bc_rcvlink(l)) {
1497 match1 = def_cnt & 0xf;
1498 match2 = tipc_own_addr(l->net) & 0xf;
1499 if (match1 == match2)
1500 return TIPC_LINK_SND_STATE;
1501 return 0;
1502 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001503
Tuong Lien382f5982019-04-04 11:09:52 +07001504 if (defq_len >= 3 && !((defq_len - 3) % 16))
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001505 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001506 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001507}
1508
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001509/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001510 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001511 * @skb: TIPC packet
1512 * @xmitq: queue to place packets to be sent after this call
1513 */
1514int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1515 struct sk_buff_head *xmitq)
1516{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001517 struct sk_buff_head *defq = &l->deferdq;
Tuong Lien382f5982019-04-04 11:09:52 +07001518 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001519 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001520 int rc = 0;
1521
Tuong Lien382f5982019-04-04 11:09:52 +07001522 /* Verify and update link state */
1523 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1524 return tipc_link_proto_rcv(l, skb, xmitq);
1525
1526 /* Don't send probe at next timeout expiration */
1527 l->silent_intv_cnt = 0;
1528
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001529 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001530 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001531 seqno = msg_seqno(hdr);
1532 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001533 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001534
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001535 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001536 if (l->state == LINK_ESTABLISHING)
1537 rc = TIPC_LINK_UP_EVT;
1538 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001539 }
1540
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001541 /* Drop if outside receive window */
1542 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1543 l->stats.duplicates++;
1544 goto drop;
1545 }
1546
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001547 /* Forward queues and wake up waiting users */
1548 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1549 tipc_link_advance_backlog(l, xmitq);
1550 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1551 link_prepare_wakeup(l);
1552 }
1553
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001554 /* Defer delivery if sequence gap */
1555 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001556 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001557 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001558 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001559 }
1560
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001561 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001562 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001563 l->stats.recv_pkts++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001564
1565 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1566 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1567 else if (!tipc_data_input(l, skb, l->inputq))
1568 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001569 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001570 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001571 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001572 break;
Tuong Lien382f5982019-04-04 11:09:52 +07001573 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001574
1575 return rc;
1576drop:
1577 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001578 return rc;
1579}
1580
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001581static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001582 bool probe_reply, u16 rcvgap,
1583 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001584 struct sk_buff_head *xmitq)
1585{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001586 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001587 struct sk_buff *skb;
1588 struct tipc_msg *hdr;
1589 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001590 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001591 struct tipc_mon_state *mstate = &l->mon_state;
1592 int dlen = 0;
1593 void *data;
Tuong Lien91959482019-04-04 11:09:51 +07001594 u16 glen = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001595
1596 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001597 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001598 return;
1599
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001600 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1601 return;
1602
1603 if (!skb_queue_empty(dfq))
1604 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1605
1606 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Tuong Lien91959482019-04-04 11:09:51 +07001607 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1608 l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001609 if (!skb)
1610 return;
1611
1612 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001613 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001614 msg_set_session(hdr, l->session);
1615 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001616 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001617 msg_set_next_sent(hdr, l->snd_nxt);
1618 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001619 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001620 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001621 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001622 msg_set_link_tolerance(hdr, tolerance);
1623 msg_set_linkprio(hdr, priority);
1624 msg_set_redundant_link(hdr, node_up);
1625 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001626 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001627
1628 if (mtyp == STATE_MSG) {
Jon Maloy9012de52018-07-10 01:07:35 +02001629 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1630 msg_set_seqno(hdr, l->snd_nxt_state++);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001631 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001632 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001633 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001634 msg_set_is_keepalive(hdr, probe || probe_reply);
Tuong Lien91959482019-04-04 11:09:51 +07001635 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
1636 glen = tipc_build_gap_ack_blks(l, data);
1637 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1638 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1639 skb_trim(skb, INT_H_SIZE + glen + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001640 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001641 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001642 } else {
1643 /* RESET_MSG or ACTIVATE_MSG */
Tuong Lien91986ee2019-02-11 13:29:43 +07001644 if (mtyp == ACTIVATE_MSG) {
1645 msg_set_dest_session_valid(hdr, 1);
1646 msg_set_dest_session(hdr, l->peer_session);
1647 }
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001648 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001649 strcpy(data, l->if_name);
1650 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1651 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001652 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001653 if (probe)
1654 l->stats.sent_probes++;
1655 if (rcvgap)
1656 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001657 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001658 __skb_queue_tail(xmitq, skb);
Tuong Lien26574db2018-12-19 09:17:57 +07001659 trace_tipc_proto_build(skb, false, l->name);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001660}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001661
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001662void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1663 struct sk_buff_head *xmitq)
1664{
1665 u32 onode = tipc_own_addr(l->net);
1666 struct tipc_msg *hdr, *ihdr;
1667 struct sk_buff_head tnlq;
1668 struct sk_buff *skb;
1669 u32 dnode = l->addr;
1670
Jon Maloye654f9f2019-08-15 16:42:50 +02001671 __skb_queue_head_init(&tnlq);
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001672 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1673 INT_H_SIZE, BASIC_H_SIZE,
1674 dnode, onode, 0, 0, 0);
1675 if (!skb) {
1676 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1677 return;
1678 }
1679
1680 hdr = buf_msg(skb);
1681 msg_set_msgcnt(hdr, 1);
1682 msg_set_bearer_id(hdr, l->peer_bearer_id);
1683
1684 ihdr = (struct tipc_msg *)msg_data(hdr);
1685 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1686 BASIC_H_SIZE, dnode);
1687 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1688 __skb_queue_tail(&tnlq, skb);
1689 tipc_link_xmit(l, &tnlq, xmitq);
1690}
1691
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001692/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001693 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001694 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001695void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1696 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001697{
Tuong Lien58ee86b2019-04-04 11:09:53 +07001698 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001699 struct sk_buff *skb, *tnlskb;
1700 struct tipc_msg *hdr, tnlhdr;
1701 struct sk_buff_head *queue = &l->transmq;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001702 struct sk_buff_head tmpxq, tnlq, frags;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001703 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001704 bool pktcnt_need_update = false;
Tuong Lien4929a932019-07-24 08:56:11 +07001705 u16 syncpt;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001706 int rc;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001707
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001708 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001709 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001710
Jon Maloye654f9f2019-08-15 16:42:50 +02001711 __skb_queue_head_init(&tnlq);
1712 __skb_queue_head_init(&tmpxq);
1713 __skb_queue_head_init(&frags);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001714
1715 /* At least one packet required for safe algorithm => add dummy */
1716 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001717 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001718 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001719 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001720 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001721 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001722 }
Jon Maloye654f9f2019-08-15 16:42:50 +02001723 __skb_queue_tail(&tnlq, skb);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001724 tipc_link_xmit(l, &tnlq, &tmpxq);
1725 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001726
Tuong Lien4929a932019-07-24 08:56:11 +07001727 /* Link Synching:
1728 * From now on, send only one single ("dummy") SYNCH message
1729 * to peer. The SYNCH message does not contain any data, just
1730 * a header conveying the synch point to the peer.
1731 */
1732 if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1733 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1734 INT_H_SIZE, 0, l->addr,
1735 tipc_own_addr(l->net),
1736 0, 0, 0);
1737 if (!tnlskb) {
1738 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1739 link_co_err);
1740 return;
1741 }
1742
1743 hdr = buf_msg(tnlskb);
1744 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1745 msg_set_syncpt(hdr, syncpt);
1746 msg_set_bearer_id(hdr, l->peer_bearer_id);
1747 __skb_queue_tail(&tnlq, tnlskb);
1748 tipc_link_xmit(tnl, &tnlq, xmitq);
1749 return;
1750 }
1751
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001752 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001753 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001754 mtyp, INT_H_SIZE, l->addr);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001755 if (mtyp == SYNCH_MSG)
1756 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1757 else
1758 pktcnt = skb_queue_len(&l->transmq);
1759 pktcnt += skb_queue_len(&l->backlogq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001760 msg_set_msgcnt(&tnlhdr, pktcnt);
1761 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1762tnl:
1763 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001764 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001765 hdr = buf_msg(skb);
1766 if (queue == &l->backlogq)
1767 msg_set_seqno(hdr, seqno++);
1768 pktlen = msg_size(hdr);
Tuong Lien2320bcd2019-07-24 08:56:12 +07001769
1770 /* Tunnel link MTU is not large enough? This could be
1771 * due to:
1772 * 1) Link MTU has just changed or set differently;
1773 * 2) Or FAILOVER on the top of a SYNCH message
1774 *
1775 * The 2nd case should not happen if peer supports
1776 * TIPC_TUNNEL_ENHANCED
1777 */
1778 if (pktlen > tnl->mtu - INT_H_SIZE) {
1779 if (mtyp == FAILOVER_MSG &&
1780 (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1781 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
1782 &frags);
1783 if (rc) {
1784 pr_warn("%sunable to frag msg: rc %d\n",
1785 link_co_err, rc);
1786 return;
1787 }
1788 pktcnt += skb_queue_len(&frags) - 1;
1789 pktcnt_need_update = true;
1790 skb_queue_splice_tail_init(&frags, &tnlq);
1791 continue;
1792 }
1793 /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
1794 * => Just warn it and return!
1795 */
1796 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
1797 link_co_err, msg_user(hdr),
1798 msg_type(hdr), msg_size(hdr));
1799 return;
1800 }
1801
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001802 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01001803 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001804 if (!tnlskb) {
1805 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001806 return;
1807 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001808 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1809 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1810 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001811 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001812 if (queue != &l->backlogq) {
1813 queue = &l->backlogq;
1814 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001815 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001816
Tuong Lien2320bcd2019-07-24 08:56:12 +07001817 if (pktcnt_need_update)
1818 skb_queue_walk(&tnlq, skb) {
1819 hdr = buf_msg(skb);
1820 msg_set_msgcnt(hdr, pktcnt);
1821 }
1822
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001823 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001824
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001825 if (mtyp == FAILOVER_MSG) {
1826 tnl->drop_point = l->rcv_nxt;
1827 tnl->failover_reasm_skb = l->reasm_buf;
1828 l->reasm_buf = NULL;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001829
1830 /* Failover the link's deferdq */
1831 if (unlikely(!skb_queue_empty(fdefq))) {
1832 pr_warn("Link failover deferdq not empty: %d!\n",
1833 skb_queue_len(fdefq));
1834 __skb_queue_purge(fdefq);
1835 }
1836 skb_queue_splice_init(&l->deferdq, fdefq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001837 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001838}
1839
Tuong Lienc0b14a082019-05-02 17:23:23 +07001840/**
1841 * tipc_link_failover_prepare() - prepare tnl for link failover
1842 *
1843 * This is a special version of the precursor - tipc_link_tnl_prepare(),
1844 * see the tipc_node_link_failover() for details
1845 *
1846 * @l: failover link
1847 * @tnl: tunnel link
1848 * @xmitq: queue for messages to be xmited
1849 */
1850void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
1851 struct sk_buff_head *xmitq)
1852{
1853 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
1854
1855 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
1856
1857 /* This failover link enpoint was never established before,
1858 * so it has not received anything from peer.
1859 * Otherwise, it must be a normal failover situation or the
1860 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
1861 * would have to start over from scratch instead.
1862 */
Tuong Lienc0b14a082019-05-02 17:23:23 +07001863 tnl->drop_point = 1;
1864 tnl->failover_reasm_skb = NULL;
1865
1866 /* Initiate the link's failover deferdq */
1867 if (unlikely(!skb_queue_empty(fdefq))) {
1868 pr_warn("Link failover deferdq not empty: %d!\n",
1869 skb_queue_len(fdefq));
1870 __skb_queue_purge(fdefq);
1871 }
1872}
1873
Jon Maloy7ea817f2018-07-10 01:07:36 +02001874/* tipc_link_validate_msg(): validate message against current link state
1875 * Returns true if message should be accepted, otherwise false
1876 */
1877bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
1878{
1879 u16 curr_session = l->peer_session;
1880 u16 session = msg_session(hdr);
1881 int mtyp = msg_type(hdr);
1882
1883 if (msg_user(hdr) != LINK_PROTOCOL)
1884 return true;
1885
1886 switch (mtyp) {
1887 case RESET_MSG:
1888 if (!l->in_session)
1889 return true;
1890 /* Accept only RESET with new session number */
1891 return more(session, curr_session);
1892 case ACTIVATE_MSG:
1893 if (!l->in_session)
1894 return true;
1895 /* Accept only ACTIVATE with new or current session number */
1896 return !less(session, curr_session);
1897 case STATE_MSG:
1898 /* Accept only STATE with current session number */
1899 if (!l->in_session)
1900 return false;
1901 if (session != curr_session)
1902 return false;
LUU Duc Canhd949cfe2018-09-26 22:28:52 +02001903 /* Extra sanity check */
1904 if (!link_is_up(l) && msg_ack(hdr))
1905 return false;
Jon Maloy7ea817f2018-07-10 01:07:36 +02001906 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
1907 return true;
1908 /* Accept only STATE with new sequence number */
1909 return !less(msg_seqno(hdr), l->rcv_nxt_state);
1910 default:
1911 return false;
1912 }
1913}
1914
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001915/* tipc_link_proto_rcv(): receive link level protocol message :
1916 * Note that network plane id propagates through the network, and may
1917 * change at any time. The node with lowest numerical id determines
1918 * network plane
1919 */
1920static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1921 struct sk_buff_head *xmitq)
1922{
1923 struct tipc_msg *hdr = buf_msg(skb);
Tuong Lien91959482019-04-04 11:09:51 +07001924 struct tipc_gap_ack_blks *ga = NULL;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001925 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001926 u16 ack = msg_ack(hdr);
1927 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001928 u16 peers_snd_nxt = msg_next_sent(hdr);
1929 u16 peers_tol = msg_link_tolerance(hdr);
1930 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001931 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001932 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001933 int mtyp = msg_type(hdr);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001934 bool reply = msg_probe(hdr);
Tuong Lien91959482019-04-04 11:09:51 +07001935 u16 glen = 0;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001936 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001937 char *if_name;
1938 int rc = 0;
1939
Tuong Lien26574db2018-12-19 09:17:57 +07001940 trace_tipc_proto_rcv(skb, false, l->name);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001941 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001942 goto exit;
1943
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001944 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001945 l->net_plane = msg_net_plane(hdr);
1946
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001947 skb_linearize(skb);
1948 hdr = buf_msg(skb);
1949 data = msg_data(hdr);
1950
Tuong Lien26574db2018-12-19 09:17:57 +07001951 if (!tipc_link_validate_msg(l, hdr)) {
1952 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
1953 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
Jon Maloy7ea817f2018-07-10 01:07:36 +02001954 goto exit;
Tuong Lien26574db2018-12-19 09:17:57 +07001955 }
Jon Maloy7ea817f2018-07-10 01:07:36 +02001956
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001957 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001958 case RESET_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001959 case ACTIVATE_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001960 /* Complete own link name with peer's interface name */
1961 if_name = strrchr(l->name, ':') + 1;
1962 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1963 break;
1964 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1965 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001966 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001967
1968 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02001969 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001970 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02001971 l->bc_rcvlink->tolerance = peers_tol;
1972 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001973 /* Update own priority if peer's priority is higher */
1974 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1975 l->priority = peers_prio;
1976
Jon Maloy7ab412d2018-11-10 17:30:24 -05001977 /* If peer is going down we want full re-establish cycle */
1978 if (msg_peer_stopping(hdr)) {
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001979 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Maloy7ab412d2018-11-10 17:30:24 -05001980 break;
1981 }
Tuong Lien91986ee2019-02-11 13:29:43 +07001982
1983 /* If this endpoint was re-created while peer was ESTABLISHING
1984 * it doesn't know current session number. Force re-synch.
1985 */
1986 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
1987 l->session != msg_dest_session(hdr)) {
1988 if (less(l->session, msg_dest_session(hdr)))
1989 l->session = msg_dest_session(hdr) + 1;
1990 break;
1991 }
1992
Jon Maloy7ab412d2018-11-10 17:30:24 -05001993 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1994 if (mtyp == RESET_MSG || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001995 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1996
1997 /* ACTIVATE_MSG takes up link if it was already locally reset */
Jon Maloy7ab412d2018-11-10 17:30:24 -05001998 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001999 rc = TIPC_LINK_UP_EVT;
2000
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002001 l->peer_session = msg_session(hdr);
Jon Maloy7ea817f2018-07-10 01:07:36 +02002002 l->in_session = true;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002003 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002004 if (l->mtu > msg_max_pkt(hdr))
2005 l->mtu = msg_max_pkt(hdr);
2006 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002007
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002008 case STATE_MSG:
Jon Maloy9012de52018-07-10 01:07:35 +02002009 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2010
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002011 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02002012 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002013 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002014 l->bc_rcvlink->tolerance = peers_tol;
2015 }
Jon Paul Maloyf7967552016-11-23 21:05:26 -05002016 /* Update own prio if peer indicates a different value */
2017 if ((peers_prio != l->priority) &&
2018 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01002019 l->priority = peers_prio;
2020 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2021 }
2022
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002023 l->silent_intv_cnt = 0;
2024 l->stats.recv_states++;
2025 if (msg_probe(hdr))
2026 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002027
2028 if (!link_is_up(l)) {
2029 if (l->state == LINK_ESTABLISHING)
2030 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002031 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002032 }
Tuong Lien91959482019-04-04 11:09:51 +07002033
2034 /* Receive Gap ACK blocks from peer if any */
2035 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
2036 ga = (struct tipc_gap_ack_blks *)data;
2037 glen = ntohs(ga->len);
2038 /* sanity check: if failed, ignore Gap ACK blocks */
2039 if (glen != tipc_gap_ack_blks_sz(ga->gack_cnt))
2040 ga = NULL;
2041 }
2042
2043 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04002044 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002045
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002046 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04002047 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002048 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002049 if (rcvgap || reply)
2050 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2051 rcvgap, 0, 0, xmitq);
Tuong Lien91959482019-04-04 11:09:51 +07002052
Tuong Lien6a6b5c82019-06-17 12:15:42 +07002053 rc |= tipc_link_advance_transmq(l, ack, gap, ga, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002054
2055 /* If NACK, retransmit will now start at right position */
Tuong Lien91959482019-04-04 11:09:51 +07002056 if (gap)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002057 l->stats.recv_nacks++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002058
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002059 tipc_link_advance_backlog(l, xmitq);
2060 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2061 link_prepare_wakeup(l);
2062 }
2063exit:
2064 kfree_skb(skb);
2065 return rc;
2066}
2067
Jon Paul Maloy52666982015-10-22 08:51:41 -04002068/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2069 */
2070static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2071 u16 peers_snd_nxt,
2072 struct sk_buff_head *xmitq)
2073{
2074 struct sk_buff *skb;
2075 struct tipc_msg *hdr;
2076 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2077 u16 ack = l->rcv_nxt - 1;
2078 u16 gap_to = peers_snd_nxt - 1;
2079
2080 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002081 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002082 if (!skb)
2083 return false;
2084 hdr = buf_msg(skb);
2085 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2086 msg_set_bcast_ack(hdr, ack);
2087 msg_set_bcgap_after(hdr, ack);
2088 if (dfrd_skb)
2089 gap_to = buf_seqno(dfrd_skb) - 1;
2090 msg_set_bcgap_to(hdr, gap_to);
2091 msg_set_non_seq(hdr, bcast);
2092 __skb_queue_tail(xmitq, skb);
2093 return true;
2094}
2095
2096/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2097 *
2098 * Give a newly added peer node the sequence number where it should
2099 * start receiving and acking broadcast packets.
2100 */
Wu Fengguang742e0382015-10-24 22:56:01 +08002101static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2102 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002103{
2104 struct sk_buff_head list;
2105
2106 __skb_queue_head_init(&list);
2107 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2108 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04002109 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002110 tipc_link_xmit(l, &list, xmitq);
2111}
2112
2113/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2114 */
2115void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2116{
2117 int mtyp = msg_type(hdr);
2118 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2119
2120 if (link_is_up(l))
2121 return;
2122
2123 if (msg_user(hdr) == BCAST_PROTOCOL) {
2124 l->rcv_nxt = peers_snd_nxt;
2125 l->state = LINK_ESTABLISHED;
2126 return;
2127 }
2128
2129 if (l->peer_caps & TIPC_BCAST_SYNCH)
2130 return;
2131
2132 if (msg_peer_node_is_up(hdr))
2133 return;
2134
2135 /* Compatibility: accept older, less safe initial synch data */
2136 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2137 l->rcv_nxt = peers_snd_nxt;
2138}
2139
2140/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2141 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002142int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2143 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002144{
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04002145 struct tipc_link *snd_l = l->bc_sndlink;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002146 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002147 u16 from = msg_bcast_ack(hdr) + 1;
2148 u16 to = from + msg_bc_gap(hdr) - 1;
2149 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002150
2151 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002152 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002153
2154 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002155 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002156
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04002157 /* Open when peer ackowledges our bcast init msg (pkt #1) */
2158 if (msg_ack(hdr))
2159 l->bc_peer_is_up = true;
2160
2161 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002162 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002163
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04002164 l->stats.recv_nacks++;
2165
Jon Paul Maloy52666982015-10-22 08:51:41 -04002166 /* Ignore if peers_snd_nxt goes beyond receive window */
2167 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002168 return rc;
2169
Tuong Lien6a6b5c82019-06-17 12:15:42 +07002170 rc = tipc_link_bc_retrans(snd_l, l, from, to, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002171
2172 l->snd_nxt = peers_snd_nxt;
2173 if (link_bc_rcv_gap(l))
2174 rc |= TIPC_LINK_SND_STATE;
2175
2176 /* Return now if sender supports nack via STATE messages */
2177 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2178 return rc;
2179
2180 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04002181
2182 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2183 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002184 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002185 }
2186
2187 /* Don't NACK if one was recently sent or peeked */
2188 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2189 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002190 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002191 }
2192
2193 /* Conditionally delay NACK sending until next synch rcv */
2194 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2195 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2196 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002197 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002198 }
2199
2200 /* Send NACK now but suppress next one */
2201 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2202 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002203 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002204}
2205
2206void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
2207 struct sk_buff_head *xmitq)
2208{
2209 struct sk_buff *skb, *tmp;
2210 struct tipc_link *snd_l = l->bc_sndlink;
2211
2212 if (!link_is_up(l) || !l->bc_peer_is_up)
2213 return;
2214
2215 if (!more(acked, l->acked))
2216 return;
2217
Tuong Lien26574db2018-12-19 09:17:57 +07002218 trace_tipc_link_bc_ack(l, l->acked, acked, &snd_l->transmq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002219 /* Skip over packets peer has already acked */
2220 skb_queue_walk(&snd_l->transmq, skb) {
2221 if (more(buf_seqno(skb), l->acked))
2222 break;
2223 }
2224
2225 /* Update/release the packets peer is acking now */
2226 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
2227 if (more(buf_seqno(skb), acked))
2228 break;
2229 if (!--TIPC_SKB_CB(skb)->ackers) {
2230 __skb_unlink(skb, &snd_l->transmq);
2231 kfree_skb(skb);
2232 }
2233 }
2234 l->acked = acked;
2235 tipc_link_advance_backlog(snd_l, xmitq);
2236 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
2237 link_prepare_wakeup(snd_l);
2238}
2239
2240/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002241 * This function is here for backwards compatibility, since
2242 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04002243 */
2244int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2245 struct sk_buff_head *xmitq)
2246{
2247 struct tipc_msg *hdr = buf_msg(skb);
2248 u32 dnode = msg_destnode(hdr);
2249 int mtyp = msg_type(hdr);
2250 u16 acked = msg_bcast_ack(hdr);
2251 u16 from = acked + 1;
2252 u16 to = msg_bcgap_to(hdr);
2253 u16 peers_snd_nxt = to + 1;
2254 int rc = 0;
2255
2256 kfree_skb(skb);
2257
2258 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2259 return 0;
2260
2261 if (mtyp != STATE_MSG)
2262 return 0;
2263
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002264 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04002265 tipc_link_bc_ack_rcv(l, acked, xmitq);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07002266 rc = tipc_link_bc_retrans(l->bc_sndlink, l, from, to, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002267 l->stats.recv_nacks++;
2268 return rc;
2269 }
2270
2271 /* Msg for other node => suppress own NACK at next sync if applicable */
2272 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2273 l->nack_state = BC_NACK_SND_SUPPRESS;
2274
2275 return 0;
2276}
2277
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04002278void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002279{
Jon Maloy218527f2018-03-29 23:20:41 +02002280 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04002281
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04002282 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04002283 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
2284 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
2285 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
2286 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002287 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002288}
2289
Allan Stephens5c216e12011-10-18 11:34:29 -04002290/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002291 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05002292 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01002293 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002294void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002295{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002296 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01002297}
2298
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002299static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002300{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002301 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04002302 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002303 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08002304
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002305 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002306 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2307 skb_queue_len(&l->transmq), head, tail,
2308 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002309}
Richard Alpe0655f6a2014-11-20 10:29:07 +01002310
2311/* Parse and validate nested (link) properties valid for media, bearer and link
2312 */
2313int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2314{
2315 int err;
2316
Johannes Berg8cb08172019-04-26 14:07:28 +02002317 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2318 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01002319 if (err)
2320 return err;
2321
2322 if (props[TIPC_NLA_PROP_PRIO]) {
2323 u32 prio;
2324
2325 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2326 if (prio > TIPC_MAX_LINK_PRI)
2327 return -EINVAL;
2328 }
2329
2330 if (props[TIPC_NLA_PROP_TOL]) {
2331 u32 tol;
2332
2333 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2334 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2335 return -EINVAL;
2336 }
2337
2338 if (props[TIPC_NLA_PROP_WIN]) {
2339 u32 win;
2340
2341 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2342 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2343 return -EINVAL;
2344 }
2345
2346 return 0;
2347}
Richard Alpe7be57fc2014-11-20 10:29:12 +01002348
Richard Alped8182802014-11-24 11:10:29 +01002349static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002350{
2351 int i;
2352 struct nlattr *stats;
2353
2354 struct nla_map {
2355 u32 key;
2356 u32 val;
2357 };
2358
2359 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002360 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002361 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2362 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2363 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2364 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002365 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002366 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2367 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2368 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2369 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2370 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2371 s->msg_length_counts : 1},
2372 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2373 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2374 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2375 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2376 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2377 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2378 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2379 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2380 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2381 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2382 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2383 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2384 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2385 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2386 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2387 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2388 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2389 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2390 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2391 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2392 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2393 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2394 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2395 };
2396
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002397 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002398 if (!stats)
2399 return -EMSGSIZE;
2400
2401 for (i = 0; i < ARRAY_SIZE(map); i++)
2402 if (nla_put_u32(skb, map[i].key, map[i].val))
2403 goto msg_full;
2404
2405 nla_nest_end(skb, stats);
2406
2407 return 0;
2408msg_full:
2409 nla_nest_cancel(skb, stats);
2410
2411 return -EMSGSIZE;
2412}
2413
2414/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05002415int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2416 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002417{
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002418 u32 self = tipc_own_addr(net);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002419 struct nlattr *attrs;
2420 struct nlattr *prop;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002421 void *hdr;
2422 int err;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002423
Richard Alpebfb3e5d2015-02-09 09:50:03 +01002424 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002425 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002426 if (!hdr)
2427 return -EMSGSIZE;
2428
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002429 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002430 if (!attrs)
2431 goto msg_full;
2432
2433 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2434 goto attr_msg_full;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002435 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002436 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04002437 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002438 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002439 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002440 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002441 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002442 goto attr_msg_full;
2443
2444 if (tipc_link_is_up(link))
2445 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2446 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04002447 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002448 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2449 goto attr_msg_full;
2450
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002451 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002452 if (!prop)
2453 goto attr_msg_full;
2454 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2455 goto prop_msg_full;
2456 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2457 goto prop_msg_full;
2458 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002459 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002460 goto prop_msg_full;
2461 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2462 goto prop_msg_full;
2463 nla_nest_end(msg->skb, prop);
2464
2465 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2466 if (err)
2467 goto attr_msg_full;
2468
2469 nla_nest_end(msg->skb, attrs);
2470 genlmsg_end(msg->skb, hdr);
2471
2472 return 0;
2473
2474prop_msg_full:
2475 nla_nest_cancel(msg->skb, prop);
2476attr_msg_full:
2477 nla_nest_cancel(msg->skb, attrs);
2478msg_full:
2479 genlmsg_cancel(msg->skb, hdr);
2480
2481 return -EMSGSIZE;
2482}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002483
2484static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2485 struct tipc_stats *stats)
2486{
2487 int i;
2488 struct nlattr *nest;
2489
2490 struct nla_map {
2491 __u32 key;
2492 __u32 val;
2493 };
2494
2495 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002496 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002497 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2498 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2499 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2500 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002501 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002502 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2503 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2504 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2505 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2506 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2507 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2508 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2509 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2510 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2511 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2512 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2513 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2514 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2515 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2516 };
2517
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002518 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002519 if (!nest)
2520 return -EMSGSIZE;
2521
2522 for (i = 0; i < ARRAY_SIZE(map); i++)
2523 if (nla_put_u32(skb, map[i].key, map[i].val))
2524 goto msg_full;
2525
2526 nla_nest_end(skb, nest);
2527
2528 return 0;
2529msg_full:
2530 nla_nest_cancel(skb, nest);
2531
2532 return -EMSGSIZE;
2533}
2534
2535int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2536{
2537 int err;
2538 void *hdr;
2539 struct nlattr *attrs;
2540 struct nlattr *prop;
2541 struct tipc_net *tn = net_generic(net, tipc_net_id);
Hoang Le02ec6ca2019-03-19 18:49:48 +07002542 u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2543 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002544 struct tipc_link *bcl = tn->bcl;
2545
2546 if (!bcl)
2547 return 0;
2548
2549 tipc_bcast_lock(net);
2550
2551 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2552 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002553 if (!hdr) {
2554 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002555 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002556 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002557
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002558 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002559 if (!attrs)
2560 goto msg_full;
2561
2562 /* The broadcast link is always up */
2563 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2564 goto attr_msg_full;
2565
2566 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2567 goto attr_msg_full;
2568 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2569 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002570 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002571 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002572 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002573 goto attr_msg_full;
2574
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002575 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002576 if (!prop)
2577 goto attr_msg_full;
2578 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2579 goto prop_msg_full;
Hoang Le02ec6ca2019-03-19 18:49:48 +07002580 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2581 goto prop_msg_full;
2582 if (bc_mode & BCLINK_MODE_SEL)
2583 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2584 bc_ratio))
2585 goto prop_msg_full;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002586 nla_nest_end(msg->skb, prop);
2587
2588 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2589 if (err)
2590 goto attr_msg_full;
2591
2592 tipc_bcast_unlock(net);
2593 nla_nest_end(msg->skb, attrs);
2594 genlmsg_end(msg->skb, hdr);
2595
2596 return 0;
2597
2598prop_msg_full:
2599 nla_nest_cancel(msg->skb, prop);
2600attr_msg_full:
2601 nla_nest_cancel(msg->skb, attrs);
2602msg_full:
2603 tipc_bcast_unlock(net);
2604 genlmsg_cancel(msg->skb, hdr);
2605
2606 return -EMSGSIZE;
2607}
2608
Richard Alped01332f2016-02-01 08:19:56 +01002609void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2610 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002611{
2612 l->tolerance = tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002613 if (l->bc_rcvlink)
2614 l->bc_rcvlink->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002615 if (link_is_up(l))
2616 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002617}
2618
Richard Alped01332f2016-02-01 08:19:56 +01002619void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2620 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002621{
2622 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002623 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002624}
2625
2626void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2627{
2628 l->abort_limit = limit;
2629}
Tuong Lienb4b97712018-12-19 09:17:56 +07002630
2631char *tipc_link_name_ext(struct tipc_link *l, char *buf)
2632{
2633 if (!l)
2634 scnprintf(buf, TIPC_MAX_LINK_NAME, "null");
2635 else if (link_is_bc_sndlink(l))
2636 scnprintf(buf, TIPC_MAX_LINK_NAME, "broadcast-sender");
2637 else if (link_is_bc_rcvlink(l))
2638 scnprintf(buf, TIPC_MAX_LINK_NAME,
2639 "broadcast-receiver, peer %x", l->addr);
2640 else
2641 memcpy(buf, l->name, TIPC_MAX_LINK_NAME);
2642
2643 return buf;
2644}
2645
2646/**
2647 * tipc_link_dump - dump TIPC link data
2648 * @l: tipc link to be dumped
2649 * @dqueues: bitmask to decide if any link queue to be dumped?
2650 * - TIPC_DUMP_NONE: don't dump link queues
2651 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2652 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2653 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2654 * - TIPC_DUMP_INPUTQ: dump link input queue
2655 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2656 * - TIPC_DUMP_ALL: dump all the link queues above
2657 * @buf: returned buffer of dump data in format
2658 */
2659int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2660{
2661 int i = 0;
2662 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2663 struct sk_buff_head *list;
2664 struct sk_buff *hskb, *tskb;
2665 u32 len;
2666
2667 if (!l) {
2668 i += scnprintf(buf, sz, "link data: (null)\n");
2669 return i;
2670 }
2671
2672 i += scnprintf(buf, sz, "link data: %x", l->addr);
2673 i += scnprintf(buf + i, sz - i, " %x", l->state);
2674 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2675 i += scnprintf(buf + i, sz - i, " %u", l->session);
2676 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2677 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2678 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2679 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2680 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2681 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2682 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2683 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
2684 i += scnprintf(buf + i, sz - i, " %u", l->prev_from);
Jon Maloy77cf8ed2019-06-25 17:36:43 +02002685 i += scnprintf(buf + i, sz - i, " %u", 0);
Tuong Lienb4b97712018-12-19 09:17:56 +07002686 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2687
2688 list = &l->transmq;
2689 len = skb_queue_len(list);
2690 hskb = skb_peek(list);
2691 tskb = skb_peek_tail(list);
2692 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2693 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2694 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2695
2696 list = &l->deferdq;
2697 len = skb_queue_len(list);
2698 hskb = skb_peek(list);
2699 tskb = skb_peek_tail(list);
2700 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2701 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2702 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2703
2704 list = &l->backlogq;
2705 len = skb_queue_len(list);
2706 hskb = skb_peek(list);
2707 tskb = skb_peek_tail(list);
2708 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2709 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2710 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2711
2712 list = l->inputq;
2713 len = skb_queue_len(list);
2714 hskb = skb_peek(list);
2715 tskb = skb_peek_tail(list);
2716 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2717 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2718 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2719
2720 if (dqueues & TIPC_DUMP_TRANSMQ) {
2721 i += scnprintf(buf + i, sz - i, "transmq: ");
2722 i += tipc_list_dump(&l->transmq, false, buf + i);
2723 }
2724 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2725 i += scnprintf(buf + i, sz - i,
2726 "backlogq: <%u %u %u %u %u>, ",
2727 l->backlog[TIPC_LOW_IMPORTANCE].len,
2728 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2729 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2730 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2731 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2732 i += tipc_list_dump(&l->backlogq, false, buf + i);
2733 }
2734 if (dqueues & TIPC_DUMP_DEFERDQ) {
2735 i += scnprintf(buf + i, sz - i, "deferdq: ");
2736 i += tipc_list_dump(&l->deferdq, false, buf + i);
2737 }
2738 if (dqueues & TIPC_DUMP_INPUTQ) {
2739 i += scnprintf(buf + i, sz - i, "inputq: ");
2740 i += tipc_list_dump(l->inputq, false, buf + i);
2741 }
2742 if (dqueues & TIPC_DUMP_WAKEUP) {
2743 i += scnprintf(buf + i, sz - i, "wakeup: ");
2744 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2745 }
2746
2747 return i;
2748}