blob: ee3b8d0576b8939b42b5b40f10524d42735884c2 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Tuong Lienb4b97712018-12-19 09:17:56 +070046#include "trace.h"
Tuong Lienfc1b6d62019-11-08 12:05:11 +070047#include "crypto.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010048
Ying Xue796c75d2013-06-17 10:54:48 -040049#include <linux/pkt_sched.h>
50
Jon Paul Maloy38206d52015-11-19 14:30:46 -050051struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050052 u32 sent_pkts;
53 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050054 u32 sent_states;
55 u32 recv_states;
56 u32 sent_probes;
57 u32 recv_probes;
58 u32 sent_nacks;
59 u32 recv_nacks;
60 u32 sent_acks;
61 u32 sent_bundled;
62 u32 sent_bundles;
63 u32 recv_bundled;
64 u32 recv_bundles;
65 u32 retransmitted;
66 u32 sent_fragmented;
67 u32 sent_fragments;
68 u32 recv_fragmented;
69 u32 recv_fragments;
70 u32 link_congs; /* # port sends blocked by congestion */
71 u32 deferred_recv;
72 u32 duplicates;
73 u32 max_queue_sz; /* send queue size high water mark */
74 u32 accu_queue_sz; /* used for send queue size profiling */
75 u32 queue_sz_counts; /* used for send queue size profiling */
76 u32 msg_length_counts; /* used for message length profiling */
77 u32 msg_lengths_total; /* used for message length profiling */
78 u32 msg_length_profile[7]; /* used for msg. length profiling */
79};
80
81/**
82 * struct tipc_link - TIPC link data structure
83 * @addr: network address of link's peer node
84 * @name: link name character string
85 * @media_addr: media address to use when sending messages over link
86 * @timer: link timer
87 * @net: pointer to namespace struct
88 * @refcnt: reference counter for permanent references (owner node & timer)
89 * @peer_session: link session # being used by peer end of link
90 * @peer_bearer_id: bearer id used by link's peer endpoint
91 * @bearer_id: local bearer id used by link
92 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050093 * @abort_limit: # of unacknowledged continuity probes needed to reset link
94 * @state: current state of link FSM
95 * @peer_caps: bitmap describing capabilities of peer node
96 * @silent_intv_cnt: # of timer intervals without any reception from peer
97 * @proto_msg: template for control messages generated by link
98 * @pmsg: convenience pointer to "proto_msg" field
99 * @priority: current link priority
100 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400101 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500102 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
103 * @exp_msg_count: # of tunnelled messages expected during link changeover
104 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
105 * @mtu: current maximum packet size for this link
106 * @advertised_mtu: advertised own mtu when link is being established
107 * @transmitq: queue for sent, non-acked messages
108 * @backlogq: queue for messages waiting to be sent
109 * @snt_nxt: next sequence number to use for outbound messages
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500127 struct net *net;
128
129 /* Management and link supervision data */
Jon Maloy7ea817f2018-07-10 01:07:36 +0200130 u16 peer_session;
131 u16 session;
Jon Maloy9012de52018-07-10 01:07:35 +0200132 u16 snd_nxt_state;
133 u16 rcv_nxt_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500134 u32 peer_bearer_id;
135 u32 bearer_id;
136 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500137 u32 abort_limit;
138 u32 state;
139 u16 peer_caps;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200140 bool in_session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500141 bool active;
142 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500143 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500144 u32 priority;
145 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400146 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400147 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500148
149 /* Failover/synch */
150 u16 drop_point;
151 struct sk_buff *failover_reasm_skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +0700152 struct sk_buff_head failover_deferdq;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500153
154 /* Max packet negotiation */
155 u16 mtu;
156 u16 advertised_mtu;
157
158 /* Sending */
159 struct sk_buff_head transmq;
160 struct sk_buff_head backlogq;
161 struct {
162 u16 len;
163 u16 limit;
Tuong Liene95584a2019-10-02 18:49:43 +0700164 struct sk_buff *target_bskb;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500165 } backlog[5];
166 u16 snd_nxt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500167
168 /* Reception */
169 u16 rcv_nxt;
170 u32 rcv_unacked;
171 struct sk_buff_head deferdq;
172 struct sk_buff_head *inputq;
173 struct sk_buff_head *namedq;
174
175 /* Congestion handling */
176 struct sk_buff_head wakeupq;
Jon Maloy16ad3f42019-12-10 00:52:46 +0100177 u16 window;
178 u16 min_win;
179 u16 ssthresh;
180 u16 max_win;
181 u16 cong_acks;
182 u16 checkpoint;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500183
184 /* Fragmentation/reassembly */
185 struct sk_buff *reasm_buf;
Tuong Lien2320bcd2019-07-24 08:56:12 +0700186 struct sk_buff *reasm_tnlmsg;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500187
188 /* Broadcast */
189 u16 ackers;
190 u16 acked;
Tuong Liend7626b52020-05-26 16:38:34 +0700191 u16 last_gap;
192 struct tipc_gap_ack_blks *last_ga;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500193 struct tipc_link *bc_rcvlink;
194 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400195 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500196 bool bc_peer_is_up;
197
198 /* Statistics */
199 struct tipc_stats stats;
200};
201
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400202/*
203 * Error message prefixes
204 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400205static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400206static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100207
Jon Paul Maloy52666982015-10-22 08:51:41 -0400208/* Send states for broadcast NACKs
209 */
210enum {
211 BC_NACK_SND_CONDITIONAL,
212 BC_NACK_SND_UNCONDITIONAL,
213 BC_NACK_SND_SUPPRESS,
214};
215
Jon Maloy53962bc2019-06-28 17:06:20 +0200216#define TIPC_BC_RETR_LIM (jiffies + msecs_to_jiffies(10))
Tuong Lien382f5982019-04-04 11:09:52 +0700217#define TIPC_UC_RETR_TIME (jiffies + msecs_to_jiffies(1))
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400218
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900219/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400220 * Interval between NACKs when packets arrive out of order
221 */
222#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500223
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400224/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400225 */
226enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400227 LINK_ESTABLISHED = 0xe,
228 LINK_ESTABLISHING = 0xe << 4,
229 LINK_RESET = 0x1 << 8,
230 LINK_RESETTING = 0x2 << 12,
231 LINK_PEER_RESET = 0xd << 16,
232 LINK_FAILINGOVER = 0xf << 20,
233 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400234};
235
236/* Link FSM state checking routines
237 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400238static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400239{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400240 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400241}
242
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400243static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
244 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400245static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100246 bool probe_reply, u16 rcvgap,
247 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400248 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500249static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400250static int tipc_link_build_nack_msg(struct tipc_link *l,
251 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400252static void tipc_link_build_bc_init_msg(struct tipc_link *l,
253 struct sk_buff_head *xmitq);
Tuong Liend7626b52020-05-26 16:38:34 +0700254static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
255 struct tipc_link *l, u8 start_index);
256static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr);
257static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
258 u16 acked, u16 gap,
Tuong Lien6a6b5c82019-06-17 12:15:42 +0700259 struct tipc_gap_ack_blks *ga,
Tuong Liend7626b52020-05-26 16:38:34 +0700260 struct sk_buff_head *xmitq,
261 bool *retransmitted, int *rc);
Jon Maloy16ad3f42019-12-10 00:52:46 +0100262static void tipc_link_update_cwin(struct tipc_link *l, int released,
263 bool retransmitted);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100264/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800265 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100266 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400267bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100268{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400269 return link_is_up(l);
270}
271
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400272bool tipc_link_peer_is_down(struct tipc_link *l)
273{
274 return l->state == LINK_PEER_RESET;
275}
276
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400277bool tipc_link_is_reset(struct tipc_link *l)
278{
279 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
280}
281
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400282bool tipc_link_is_establishing(struct tipc_link *l)
283{
284 return l->state == LINK_ESTABLISHING;
285}
286
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400287bool tipc_link_is_synching(struct tipc_link *l)
288{
289 return l->state == LINK_SYNCHING;
290}
291
292bool tipc_link_is_failingover(struct tipc_link *l)
293{
294 return l->state == LINK_FAILINGOVER;
295}
296
297bool tipc_link_is_blocked(struct tipc_link *l)
298{
299 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100300}
301
Wu Fengguang742e0382015-10-24 22:56:01 +0800302static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400303{
304 return !l->bc_sndlink;
305}
306
Wu Fengguang742e0382015-10-24 22:56:01 +0800307static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400308{
309 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
310}
311
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400312void tipc_link_set_active(struct tipc_link *l, bool active)
313{
314 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100315}
316
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500317u32 tipc_link_id(struct tipc_link *l)
318{
319 return l->peer_bearer_id << 16 | l->bearer_id;
320}
321
Jon Maloy16ad3f42019-12-10 00:52:46 +0100322int tipc_link_min_win(struct tipc_link *l)
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500323{
Jon Maloy16ad3f42019-12-10 00:52:46 +0100324 return l->min_win;
325}
326
327int tipc_link_max_win(struct tipc_link *l)
328{
329 return l->max_win;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500330}
331
332int tipc_link_prio(struct tipc_link *l)
333{
334 return l->priority;
335}
336
337unsigned long tipc_link_tolerance(struct tipc_link *l)
338{
339 return l->tolerance;
340}
341
342struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
343{
344 return l->inputq;
345}
346
347char tipc_link_plane(struct tipc_link *l)
348{
349 return l->net_plane;
350}
351
Jon Maloy9012de52018-07-10 01:07:35 +0200352void tipc_link_update_caps(struct tipc_link *l, u16 capabilities)
353{
354 l->peer_caps = capabilities;
355}
356
Jon Paul Maloy52666982015-10-22 08:51:41 -0400357void tipc_link_add_bc_peer(struct tipc_link *snd_l,
358 struct tipc_link *uc_l,
359 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400360{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400361 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
362
363 snd_l->ackers++;
364 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500365 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400366 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400367}
368
Jon Paul Maloy52666982015-10-22 08:51:41 -0400369void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
370 struct tipc_link *rcv_l,
371 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400372{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400373 u16 ack = snd_l->snd_nxt - 1;
374
375 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400376 rcv_l->bc_peer_is_up = true;
377 rcv_l->state = LINK_ESTABLISHED;
Tuong Liena91d55d2020-05-26 16:38:36 +0700378 tipc_link_bc_ack_rcv(rcv_l, ack, 0, NULL, xmitq, NULL);
Tuong Lien26574db2018-12-19 09:17:57 +0700379 trace_tipc_link_reset(rcv_l, TIPC_DUMP_ALL, "bclink removed!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400380 tipc_link_reset(rcv_l);
381 rcv_l->state = LINK_RESET;
382 if (!snd_l->ackers) {
Tuong Lien26574db2018-12-19 09:17:57 +0700383 trace_tipc_link_reset(snd_l, TIPC_DUMP_ALL, "zero ackers!");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400384 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500385 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400386 __skb_queue_purge(xmitq);
387 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400388}
389
390int tipc_link_bc_peers(struct tipc_link *l)
391{
392 return l->ackers;
393}
394
YueHaibinge064cce2018-07-19 17:16:59 +0800395static u16 link_bc_rcv_gap(struct tipc_link *l)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400396{
397 struct sk_buff *skb = skb_peek(&l->deferdq);
398 u16 gap = 0;
399
400 if (more(l->snd_nxt, l->rcv_nxt))
401 gap = l->snd_nxt - l->rcv_nxt;
402 if (skb)
403 gap = buf_seqno(skb) - l->rcv_nxt;
404 return gap;
405}
406
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400407void tipc_link_set_mtu(struct tipc_link *l, int mtu)
408{
409 l->mtu = mtu;
410}
411
412int tipc_link_mtu(struct tipc_link *l)
413{
414 return l->mtu;
415}
416
Tuong Lienfc1b6d62019-11-08 12:05:11 +0700417int tipc_link_mss(struct tipc_link *l)
418{
419#ifdef CONFIG_TIPC_CRYPTO
420 return l->mtu - INT_H_SIZE - EMSG_OVERHEAD;
421#else
422 return l->mtu - INT_H_SIZE;
423#endif
424}
425
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500426u16 tipc_link_rcv_nxt(struct tipc_link *l)
427{
428 return l->rcv_nxt;
429}
430
431u16 tipc_link_acked(struct tipc_link *l)
432{
433 return l->acked;
434}
435
436char *tipc_link_name(struct tipc_link *l)
437{
438 return l->name;
439}
440
LUU Duc Canhc140eb12018-09-26 21:00:54 +0200441u32 tipc_link_state(struct tipc_link *l)
442{
443 return l->state;
444}
445
Per Lidenb97bf3f2006-01-02 19:04:38 +0100446/**
Per Liden4323add2006-01-18 00:38:21 +0100447 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400448 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400449 * @if_name: associated interface name
450 * @bearer_id: id (index) of associated bearer
451 * @tolerance: link tolerance to be used by link
452 * @net_plane: network plane (A,B,c..) this link belongs to
453 * @mtu: mtu to be advertised by link
454 * @priority: priority to be used by link
Jon Maloy16ad3f42019-12-10 00:52:46 +0100455 * @min_win: minimal send window to be used by link
456 * @max_win: maximal send window to be used by link
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400457 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400458 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400459 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400460 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400461 * @bc_sndlink: the namespace global link used for broadcast sending
462 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400463 * @inputq: queue to put messages ready for delivery
464 * @namedq: queue to put binding table update messages ready for delivery
465 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900466 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400467 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100468 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400469bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400470 int tolerance, char net_plane, u32 mtu, int priority,
Jon Maloy16ad3f42019-12-10 00:52:46 +0100471 u32 min_win, u32 max_win, u32 session, u32 self,
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100472 u32 peer, u8 *peer_id, u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400473 struct tipc_link *bc_sndlink,
474 struct tipc_link *bc_rcvlink,
475 struct sk_buff_head *inputq,
476 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400477 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100478{
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100479 char peer_str[NODE_ID_STR_LEN] = {0,};
480 char self_str[NODE_ID_STR_LEN] = {0,};
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400481 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500482
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400483 l = kzalloc(sizeof(*l), GFP_ATOMIC);
484 if (!l)
485 return false;
486 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500487 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400488
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100489 /* Set link name for unicast links only */
490 if (peer_id) {
491 tipc_nodeid2string(self_str, tipc_own_id(net));
492 if (strlen(self_str) > 16)
493 sprintf(self_str, "%x", self);
494 tipc_nodeid2string(peer_str, peer_id);
495 if (strlen(peer_str) > 16)
496 sprintf(peer_str, "%x", peer);
497 }
498 /* Peer i/f name will be completed by reset/activate message */
Jon Maloy7494cfa2018-03-29 23:20:45 +0200499 snprintf(l->name, sizeof(l->name), "%s:%s-%s:unknown",
500 self_str, if_name, peer_str);
Jon Maloy25b0b9c2018-03-22 20:42:51 +0100501
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500502 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400503 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400504 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400505 l->net = net;
Jon Maloy7ea817f2018-07-10 01:07:36 +0200506 l->in_session = false;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400507 l->bearer_id = bearer_id;
508 l->tolerance = tolerance;
Jon Maloy047491e2018-10-10 17:34:01 +0200509 if (bc_rcvlink)
510 bc_rcvlink->tolerance = tolerance;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400511 l->net_plane = net_plane;
512 l->advertised_mtu = mtu;
513 l->mtu = mtu;
514 l->priority = priority;
Jon Maloy16ad3f42019-12-10 00:52:46 +0100515 tipc_link_set_queue_limits(l, min_win, max_win);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400516 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400517 l->bc_sndlink = bc_sndlink;
518 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400519 l->inputq = inputq;
520 l->namedq = namedq;
521 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400522 __skb_queue_head_init(&l->transmq);
523 __skb_queue_head_init(&l->backlogq);
524 __skb_queue_head_init(&l->deferdq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700525 __skb_queue_head_init(&l->failover_deferdq);
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400526 skb_queue_head_init(&l->wakeupq);
527 skb_queue_head_init(l->inputq);
528 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100529}
530
Jon Paul Maloy32301902015-10-22 08:51:37 -0400531/**
532 * tipc_link_bc_create - create new link to be used for broadcast
533 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100534 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400535 * @window: send window to be used
536 * @inputq: queue to put messages ready for delivery
537 * @namedq: queue to put binding table update messages ready for delivery
538 * @link: return value, pointer to put the created link
539 *
540 * Returns true if link was created, otherwise false
541 */
Tuong Lien03b6fef2020-05-26 16:38:37 +0700542bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer, u8 *peer_id,
Jon Maloy16ad3f42019-12-10 00:52:46 +0100543 int mtu, u32 min_win, u32 max_win, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400544 struct sk_buff_head *inputq,
545 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400546 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400547 struct tipc_link **link)
548{
549 struct tipc_link *l;
550
Jon Maloy16ad3f42019-12-10 00:52:46 +0100551 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, min_win,
552 max_win, 0, ownnode, peer, NULL, peer_caps,
553 bc_sndlink, NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400554 return false;
555
556 l = *link;
Tuong Lien03b6fef2020-05-26 16:38:37 +0700557 if (peer_id) {
558 char peer_str[NODE_ID_STR_LEN] = {0,};
559
560 tipc_nodeid2string(peer_str, peer_id);
561 if (strlen(peer_str) > 16)
562 sprintf(peer_str, "%x", peer);
563 /* Broadcast receiver link name: "broadcast-link:<peer>" */
564 snprintf(l->name, sizeof(l->name), "%s:%s", tipc_bclink_name,
565 peer_str);
566 } else {
567 strcpy(l->name, tipc_bclink_name);
568 }
Tuong Lien26574db2018-12-19 09:17:57 +0700569 trace_tipc_link_reset(l, TIPC_DUMP_ALL, "bclink created!");
Jon Paul Maloy32301902015-10-22 08:51:37 -0400570 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400571 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400572 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400573 l->bc_rcvlink = l;
574
575 /* Broadcast send link is always up */
576 if (link_is_bc_sndlink(l))
577 l->state = LINK_ESTABLISHED;
578
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500579 /* Disable replicast if even a single peer doesn't support it */
580 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
Hoang Leba5f6a82019-11-21 10:01:09 +0700581 tipc_bcast_toggle_rcast(net, false);
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500582
Jon Paul Maloy32301902015-10-22 08:51:37 -0400583 return true;
584}
585
Per Lidenb97bf3f2006-01-02 19:04:38 +0100586/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400587 * tipc_link_fsm_evt - link finite state machine
588 * @l: pointer to link
589 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400590 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400591int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400592{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400593 int rc = 0;
Tuong Lien26574db2018-12-19 09:17:57 +0700594 int old_state = l->state;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400595
596 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400597 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400598 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400599 case LINK_PEER_RESET_EVT:
600 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400601 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400602 case LINK_RESET_EVT:
603 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400604 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400605 case LINK_FAILURE_EVT:
606 case LINK_FAILOVER_BEGIN_EVT:
607 case LINK_ESTABLISH_EVT:
608 case LINK_FAILOVER_END_EVT:
609 case LINK_SYNCH_BEGIN_EVT:
610 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400611 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400612 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400613 }
614 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400615 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400616 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400617 case LINK_PEER_RESET_EVT:
618 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400619 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400620 case LINK_FAILOVER_BEGIN_EVT:
621 l->state = LINK_FAILINGOVER;
622 case LINK_FAILURE_EVT:
623 case LINK_RESET_EVT:
624 case LINK_ESTABLISH_EVT:
625 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400626 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400627 case LINK_SYNCH_BEGIN_EVT:
628 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400629 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400630 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400631 }
632 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400633 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400634 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400635 case LINK_RESET_EVT:
636 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400637 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400638 case LINK_PEER_RESET_EVT:
639 case LINK_ESTABLISH_EVT:
640 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400641 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400642 case LINK_SYNCH_BEGIN_EVT:
643 case LINK_SYNCH_END_EVT:
644 case LINK_FAILOVER_BEGIN_EVT:
645 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400646 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400647 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400648 }
649 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400650 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400651 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400652 case LINK_FAILOVER_END_EVT:
653 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400654 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400655 case LINK_PEER_RESET_EVT:
656 case LINK_RESET_EVT:
657 case LINK_ESTABLISH_EVT:
658 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400659 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400660 case LINK_FAILOVER_BEGIN_EVT:
661 case LINK_SYNCH_BEGIN_EVT:
662 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400663 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400664 goto illegal_evt;
665 }
666 break;
667 case LINK_ESTABLISHING:
668 switch (evt) {
669 case LINK_ESTABLISH_EVT:
670 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400671 break;
672 case LINK_FAILOVER_BEGIN_EVT:
673 l->state = LINK_FAILINGOVER;
674 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400675 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400676 l->state = LINK_RESET;
677 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400678 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400679 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400680 case LINK_SYNCH_BEGIN_EVT:
681 case LINK_FAILOVER_END_EVT:
682 break;
683 case LINK_SYNCH_END_EVT:
684 default:
685 goto illegal_evt;
686 }
687 break;
688 case LINK_ESTABLISHED:
689 switch (evt) {
690 case LINK_PEER_RESET_EVT:
691 l->state = LINK_PEER_RESET;
692 rc |= TIPC_LINK_DOWN_EVT;
693 break;
694 case LINK_FAILURE_EVT:
695 l->state = LINK_RESETTING;
696 rc |= TIPC_LINK_DOWN_EVT;
697 break;
698 case LINK_RESET_EVT:
699 l->state = LINK_RESET;
700 break;
701 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400702 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400703 break;
704 case LINK_SYNCH_BEGIN_EVT:
705 l->state = LINK_SYNCHING;
706 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400707 case LINK_FAILOVER_BEGIN_EVT:
708 case LINK_FAILOVER_END_EVT:
709 default:
710 goto illegal_evt;
711 }
712 break;
713 case LINK_SYNCHING:
714 switch (evt) {
715 case LINK_PEER_RESET_EVT:
716 l->state = LINK_PEER_RESET;
717 rc |= TIPC_LINK_DOWN_EVT;
718 break;
719 case LINK_FAILURE_EVT:
720 l->state = LINK_RESETTING;
721 rc |= TIPC_LINK_DOWN_EVT;
722 break;
723 case LINK_RESET_EVT:
724 l->state = LINK_RESET;
725 break;
726 case LINK_ESTABLISH_EVT:
727 case LINK_SYNCH_BEGIN_EVT:
728 break;
729 case LINK_SYNCH_END_EVT:
730 l->state = LINK_ESTABLISHED;
731 break;
732 case LINK_FAILOVER_BEGIN_EVT:
733 case LINK_FAILOVER_END_EVT:
734 default:
735 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400736 }
737 break;
738 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400739 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400740 }
Tuong Lien26574db2018-12-19 09:17:57 +0700741 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400742 return rc;
743illegal_evt:
744 pr_err("Illegal FSM event %x in state %x on link %s\n",
745 evt, l->state, l->name);
Tuong Lien26574db2018-12-19 09:17:57 +0700746 trace_tipc_link_fsm(l->name, old_state, l->state, evt);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400747 return rc;
748}
749
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400750/* link_profile_stats - update statistical profiling of traffic
751 */
752static void link_profile_stats(struct tipc_link *l)
753{
754 struct sk_buff *skb;
755 struct tipc_msg *msg;
756 int length;
757
758 /* Update counters used in statistical profiling of send traffic */
759 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
760 l->stats.queue_sz_counts++;
761
762 skb = skb_peek(&l->transmq);
763 if (!skb)
764 return;
765 msg = buf_msg(skb);
766 length = msg_size(msg);
767
768 if (msg_user(msg) == MSG_FRAGMENTER) {
769 if (msg_type(msg) != FIRST_FRAGMENT)
770 return;
Jon Maloya7dc51a2019-06-25 19:37:00 +0200771 length = msg_size(msg_inner_hdr(msg));
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400772 }
773 l->stats.msg_lengths_total += length;
774 l->stats.msg_length_counts++;
775 if (length <= 64)
776 l->stats.msg_length_profile[0]++;
777 else if (length <= 256)
778 l->stats.msg_length_profile[1]++;
779 else if (length <= 1024)
780 l->stats.msg_length_profile[2]++;
781 else if (length <= 4096)
782 l->stats.msg_length_profile[3]++;
783 else if (length <= 16384)
784 l->stats.msg_length_profile[4]++;
785 else if (length <= 32768)
786 l->stats.msg_length_profile[5]++;
787 else
788 l->stats.msg_length_profile[6]++;
789}
790
Tuong Lien26574db2018-12-19 09:17:57 +0700791/**
792 * tipc_link_too_silent - check if link is "too silent"
793 * @l: tipc link to be checked
794 *
795 * Returns true if the link 'silent_intv_cnt' is about to reach the
796 * 'abort_limit' value, otherwise false
797 */
798bool tipc_link_too_silent(struct tipc_link *l)
799{
800 return (l->silent_intv_cnt + 2 > l->abort_limit);
801}
802
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400803/* tipc_link_timeout - perform periodic task as instructed from node timeout
804 */
805int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
806{
Ying Xuec91522f2016-06-15 14:11:31 +0800807 int mtyp = 0;
808 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400809 bool state = false;
810 bool probe = false;
811 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400812 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
813 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400814 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400815
Tuong Lien26574db2018-12-19 09:17:57 +0700816 trace_tipc_link_timeout(l, TIPC_DUMP_NONE, " ");
817 trace_tipc_link_too_silent(l, TIPC_DUMP_ALL, " ");
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400818 switch (l->state) {
819 case LINK_ESTABLISHED:
820 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400821 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400822 link_profile_stats(l);
823 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
824 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
825 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400826 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400827 state |= l->bc_rcvlink->rcv_unacked;
828 state |= l->rcv_unacked;
829 state |= !skb_queue_empty(&l->transmq);
830 state |= !skb_queue_empty(&l->deferdq);
831 probe = mstate->probing;
832 probe |= l->silent_intv_cnt;
833 if (probe || mstate->monitoring)
834 l->silent_intv_cnt++;
Jon Maloy16ad3f42019-12-10 00:52:46 +0100835 if (l->snd_nxt == l->checkpoint) {
836 tipc_link_update_cwin(l, 0, 0);
837 probe = true;
838 }
839 l->checkpoint = l->snd_nxt;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400840 break;
841 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400842 setup = l->rst_cnt++ <= 4;
843 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400844 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400845 break;
846 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400847 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400848 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400849 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400850 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400851 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400852 case LINK_FAILINGOVER:
853 break;
854 default:
855 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400856 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400857
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400858 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100859 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400860
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400861 return rc;
862}
863
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400864/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400865 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500866 * @l: congested link
867 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400868 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100869 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500870static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100871{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500872 u32 dnode = tipc_own_addr(l->net);
873 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400874 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100875
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400876 /* Create and schedule wakeup pseudo message */
877 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500878 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400879 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400880 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500881 msg_set_dest_droppable(buf_msg(skb), true);
882 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
883 skb_queue_tail(&l->wakeupq, skb);
884 l->stats.link_congs++;
Tuong Lien26574db2018-12-19 09:17:57 +0700885 trace_tipc_link_conges(l, TIPC_DUMP_ALL, "wakeup scheduled!");
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400886 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100887}
888
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400889/**
890 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500891 * @l: congested link
892 * Wake up a number of waiting users, as permitted by available space
893 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400894 */
YueHaibinge064cce2018-07-19 17:16:59 +0800895static void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100896{
Jon Maloy7c5b4202019-07-30 16:23:18 +0200897 struct sk_buff_head *wakeupq = &l->wakeupq;
898 struct sk_buff_head *inputq = l->inputq;
Ying Xue58d78b32014-11-26 11:41:51 +0800899 struct sk_buff *skb, *tmp;
Jon Maloy7c5b4202019-07-30 16:23:18 +0200900 struct sk_buff_head tmpq;
901 int avail[5] = {0,};
902 int imp = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100903
Jon Maloy7c5b4202019-07-30 16:23:18 +0200904 __skb_queue_head_init(&tmpq);
905
906 for (; imp <= TIPC_SYSTEM_IMPORTANCE; imp++)
907 avail[imp] = l->backlog[imp].limit - l->backlog[imp].len;
908
909 skb_queue_walk_safe(wakeupq, skb, tmp) {
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400910 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Maloy7c5b4202019-07-30 16:23:18 +0200911 if (avail[imp] <= 0)
912 continue;
913 avail[imp]--;
914 __skb_unlink(skb, wakeupq);
915 __skb_queue_tail(&tmpq, skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100916 }
Jon Maloy7c5b4202019-07-30 16:23:18 +0200917
918 spin_lock_bh(&inputq->lock);
919 skb_queue_splice_tail(&tmpq, inputq);
920 spin_unlock_bh(&inputq->lock);
921
Per Lidenb97bf3f2006-01-02 19:04:38 +0100922}
923
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400924void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100925{
Ying Xuea1f8dd32018-10-11 19:57:56 +0800926 struct sk_buff_head list;
Tuong Liene95584a2019-10-02 18:49:43 +0700927 u32 imp;
Ying Xuea1f8dd32018-10-11 19:57:56 +0800928
929 __skb_queue_head_init(&list);
930
Jon Maloy7ea817f2018-07-10 01:07:36 +0200931 l->in_session = false;
Tuong Lienf7a93782019-04-16 10:48:07 +0700932 /* Force re-synch of peer session number before establishing */
933 l->peer_session--;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500934 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400935 l->mtu = l->advertised_mtu;
Ying Xuea1f8dd32018-10-11 19:57:56 +0800936
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200937 spin_lock_bh(&l->wakeupq.lock);
Ying Xuea1f8dd32018-10-11 19:57:56 +0800938 skb_queue_splice_init(&l->wakeupq, &list);
Parthasarathy Bhuvaragan3f32d0b2018-09-25 22:09:10 +0200939 spin_unlock_bh(&l->wakeupq.lock);
940
Ying Xuea1f8dd32018-10-11 19:57:56 +0800941 spin_lock_bh(&l->inputq->lock);
942 skb_queue_splice_init(&list, l->inputq);
943 spin_unlock_bh(&l->inputq->lock);
944
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400945 __skb_queue_purge(&l->transmq);
946 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400947 __skb_queue_purge(&l->backlogq);
Tuong Lien58ee86b2019-04-04 11:09:53 +0700948 __skb_queue_purge(&l->failover_deferdq);
Tuong Liene95584a2019-10-02 18:49:43 +0700949 for (imp = 0; imp <= TIPC_SYSTEM_IMPORTANCE; imp++) {
950 l->backlog[imp].len = 0;
951 l->backlog[imp].target_bskb = NULL;
952 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400953 kfree_skb(l->reasm_buf);
Tuong Lien2320bcd2019-07-24 08:56:12 +0700954 kfree_skb(l->reasm_tnlmsg);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400955 kfree_skb(l->failover_reasm_skb);
956 l->reasm_buf = NULL;
Tuong Lien2320bcd2019-07-24 08:56:12 +0700957 l->reasm_tnlmsg = NULL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400958 l->failover_reasm_skb = NULL;
959 l->rcv_unacked = 0;
960 l->snd_nxt = 1;
961 l->rcv_nxt = 1;
Jon Maloy9012de52018-07-10 01:07:35 +0200962 l->snd_nxt_state = 1;
963 l->rcv_nxt_state = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400964 l->acked = 0;
Tuong Liend7626b52020-05-26 16:38:34 +0700965 l->last_gap = 0;
966 kfree(l->last_ga);
967 l->last_ga = NULL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400968 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400969 l->rst_cnt = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400970 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400971 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500972 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100973}
974
Per Lidenb97bf3f2006-01-02 19:04:38 +0100975/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400976 * tipc_link_xmit(): enqueue buffer list according to queue situation
977 * @link: link to use
978 * @list: chain of buffers containing message
979 * @xmitq: returned list of packets to be sent by caller
980 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500981 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400982 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
983 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
984 */
985int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
986 struct sk_buff_head *xmitq)
987{
988 struct tipc_msg *hdr = buf_msg(skb_peek(list));
Tuong Lien06e7c702019-11-01 09:58:57 +0700989 struct sk_buff_head *backlogq = &l->backlogq;
990 struct sk_buff_head *transmq = &l->transmq;
991 struct sk_buff *skb, *_skb;
992 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400993 u16 ack = l->rcv_nxt - 1;
994 u16 seqno = l->snd_nxt;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500995 int pkt_cnt = skb_queue_len(list);
Tuong Lien06e7c702019-11-01 09:58:57 +0700996 int imp = msg_importance(hdr);
Tuong Lienfc1b6d62019-11-08 12:05:11 +0700997 unsigned int mss = tipc_link_mss(l);
Jon Maloy16ad3f42019-12-10 00:52:46 +0100998 unsigned int cwin = l->window;
Tuong Lien06e7c702019-11-01 09:58:57 +0700999 unsigned int mtu = l->mtu;
1000 bool new_bundle;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001001 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001002
Richard Alpe4952cd32016-02-11 10:43:15 +01001003 if (unlikely(msg_size(hdr) > mtu)) {
Tuong Lien2320bcd2019-07-24 08:56:12 +07001004 pr_warn("Too large msg, purging xmit list %d %d %d %d %d!\n",
1005 skb_queue_len(list), msg_user(hdr),
1006 msg_type(hdr), msg_size(hdr), mtu);
Jon Maloye654f9f2019-08-15 16:42:50 +02001007 __skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001008 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +01001009 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001010
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001011 /* Allow oversubscription of one data msg per source at congestion */
1012 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
1013 if (imp == TIPC_SYSTEM_IMPORTANCE) {
1014 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
1015 return -ENOBUFS;
1016 }
1017 rc = link_schedule_user(l, hdr);
1018 }
1019
Jon Paul Maloy95901122016-11-25 10:35:02 -05001020 if (pkt_cnt > 1) {
1021 l->stats.sent_fragmented++;
1022 l->stats.sent_fragments += pkt_cnt;
1023 }
1024
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001025 /* Prepare each packet for sending, and add to relevant queue: */
Tuong Lien06e7c702019-11-01 09:58:57 +07001026 while ((skb = __skb_dequeue(list))) {
Jon Maloy16ad3f42019-12-10 00:52:46 +01001027 if (likely(skb_queue_len(transmq) < cwin)) {
Tuong Lien06e7c702019-11-01 09:58:57 +07001028 hdr = buf_msg(skb);
1029 msg_set_seqno(hdr, seqno);
1030 msg_set_ack(hdr, ack);
1031 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001032 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +01001033 if (!_skb) {
Tuong Lien06e7c702019-11-01 09:58:57 +07001034 kfree_skb(skb);
Jon Maloye654f9f2019-08-15 16:42:50 +02001035 __skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001036 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +01001037 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001038 __skb_queue_tail(transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +07001039 /* next retransmit attempt */
1040 if (link_is_bc_sndlink(l))
Jon Maloy53962bc2019-06-28 17:06:20 +02001041 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001042 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001043 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001044 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001045 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001046 seqno++;
1047 continue;
1048 }
Tuong Lien06e7c702019-11-01 09:58:57 +07001049 if (tipc_msg_try_bundle(l->backlog[imp].target_bskb, &skb,
Tuong Lienfc1b6d62019-11-08 12:05:11 +07001050 mss, l->addr, &new_bundle)) {
Tuong Lien06e7c702019-11-01 09:58:57 +07001051 if (skb) {
1052 /* Keep a ref. to the skb for next try */
1053 l->backlog[imp].target_bskb = skb;
1054 l->backlog[imp].len++;
1055 __skb_queue_tail(backlogq, skb);
1056 } else {
1057 if (new_bundle) {
1058 l->stats.sent_bundles++;
1059 l->stats.sent_bundled++;
1060 }
1061 l->stats.sent_bundled++;
1062 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001063 continue;
1064 }
Tuong Liene95584a2019-10-02 18:49:43 +07001065 l->backlog[imp].target_bskb = NULL;
Tuong Lien06e7c702019-11-01 09:58:57 +07001066 l->backlog[imp].len += (1 + skb_queue_len(list));
1067 __skb_queue_tail(backlogq, skb);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001068 skb_queue_splice_tail_init(list, backlogq);
1069 }
1070 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001071 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001072}
1073
Jon Maloy16ad3f42019-12-10 00:52:46 +01001074static void tipc_link_update_cwin(struct tipc_link *l, int released,
1075 bool retransmitted)
1076{
1077 int bklog_len = skb_queue_len(&l->backlogq);
1078 struct sk_buff_head *txq = &l->transmq;
1079 int txq_len = skb_queue_len(txq);
1080 u16 cwin = l->window;
1081
1082 /* Enter fast recovery */
1083 if (unlikely(retransmitted)) {
1084 l->ssthresh = max_t(u16, l->window / 2, 300);
Tuong Lienedadedf2020-04-15 18:34:49 +07001085 l->window = min_t(u16, l->ssthresh, l->window);
Jon Maloy16ad3f42019-12-10 00:52:46 +01001086 return;
1087 }
1088 /* Enter slow start */
1089 if (unlikely(!released)) {
1090 l->ssthresh = max_t(u16, l->window / 2, 300);
1091 l->window = l->min_win;
1092 return;
1093 }
1094 /* Don't increase window if no pressure on the transmit queue */
1095 if (txq_len + bklog_len < cwin)
1096 return;
1097
1098 /* Don't increase window if there are holes the transmit queue */
1099 if (txq_len && l->snd_nxt - buf_seqno(skb_peek(txq)) != txq_len)
1100 return;
1101
1102 l->cong_acks += released;
1103
1104 /* Slow start */
1105 if (cwin <= l->ssthresh) {
1106 l->window = min_t(u16, cwin + released, l->max_win);
1107 return;
1108 }
1109 /* Congestion avoidance */
1110 if (l->cong_acks < cwin)
1111 return;
1112 l->window = min_t(u16, ++cwin, l->max_win);
1113 l->cong_acks = 0;
1114}
1115
YueHaibinge064cce2018-07-19 17:16:59 +08001116static void tipc_link_advance_backlog(struct tipc_link *l,
1117 struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001118{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001119 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Maloy16ad3f42019-12-10 00:52:46 +01001120 struct sk_buff_head *txq = &l->transmq;
1121 struct sk_buff *skb, *_skb;
1122 u16 ack = l->rcv_nxt - 1;
1123 u16 seqno = l->snd_nxt;
1124 struct tipc_msg *hdr;
1125 u16 cwin = l->window;
Tuong Liene95584a2019-10-02 18:49:43 +07001126 u32 imp;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001127
Jon Maloy16ad3f42019-12-10 00:52:46 +01001128 while (skb_queue_len(txq) < cwin) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001129 skb = skb_peek(&l->backlogq);
1130 if (!skb)
1131 break;
1132 _skb = skb_clone(skb, GFP_ATOMIC);
1133 if (!_skb)
1134 break;
1135 __skb_dequeue(&l->backlogq);
1136 hdr = buf_msg(skb);
Tuong Liene95584a2019-10-02 18:49:43 +07001137 imp = msg_importance(hdr);
1138 l->backlog[imp].len--;
1139 if (unlikely(skb == l->backlog[imp].target_bskb))
1140 l->backlog[imp].target_bskb = NULL;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001141 __skb_queue_tail(&l->transmq, skb);
Hoang Le05572272018-12-19 11:42:19 +07001142 /* next retransmit attempt */
1143 if (link_is_bc_sndlink(l))
Jon Maloy53962bc2019-06-28 17:06:20 +02001144 TIPC_SKB_CB(skb)->nxt_retr = TIPC_BC_RETR_LIM;
Hoang Le05572272018-12-19 11:42:19 +07001145
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001146 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001147 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001148 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001149 msg_set_ack(hdr, ack);
1150 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001151 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001152 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001153 seqno++;
1154 }
1155 l->snd_nxt = seqno;
1156}
1157
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001158/**
1159 * link_retransmit_failure() - Detect repeated retransmit failures
1160 * @l: tipc link sender
1161 * @r: tipc link receiver (= l in case of unicast)
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001162 * @rc: returned code
1163 *
1164 * Return: true if the repeated retransmit failures happens, otherwise
1165 * false
1166 */
1167static bool link_retransmit_failure(struct tipc_link *l, struct tipc_link *r,
Tuong Lien71204232019-08-15 10:24:08 +07001168 int *rc)
Allan Stephensd356eeb2006-06-25 23:40:01 -07001169{
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001170 struct sk_buff *skb = skb_peek(&l->transmq);
1171 struct tipc_msg *hdr;
Allan Stephensd356eeb2006-06-25 23:40:01 -07001172
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001173 if (!skb)
1174 return false;
Tuong Lien71204232019-08-15 10:24:08 +07001175
1176 if (!TIPC_SKB_CB(skb)->retr_cnt)
1177 return false;
1178
1179 if (!time_after(jiffies, TIPC_SKB_CB(skb)->retr_stamp +
Hoang Le426071f2019-11-06 13:26:10 +07001180 msecs_to_jiffies(r->tolerance * 10)))
Tuong Lien71204232019-08-15 10:24:08 +07001181 return false;
1182
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001183 hdr = buf_msg(skb);
Tuong Lien71204232019-08-15 10:24:08 +07001184 if (link_is_bc_sndlink(l) && !less(r->acked, msg_seqno(hdr)))
1185 return false;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001186
Tuong Lien71204232019-08-15 10:24:08 +07001187 pr_warn("Retransmission failure on link <%s>\n", l->name);
1188 link_print(l, "State of link ");
1189 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
1190 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
1191 pr_info("sqno %u, prev: %x, dest: %x\n",
1192 msg_seqno(hdr), msg_prevnode(hdr), msg_destnode(hdr));
1193 pr_info("retr_stamp %d, retr_cnt %d\n",
1194 jiffies_to_msecs(TIPC_SKB_CB(skb)->retr_stamp),
1195 TIPC_SKB_CB(skb)->retr_cnt);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001196
Tuong Lien71204232019-08-15 10:24:08 +07001197 trace_tipc_list_dump(&l->transmq, true, "retrans failure!");
1198 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "retrans failure!");
1199 trace_tipc_link_dump(r, TIPC_DUMP_NONE, "retrans failure!");
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001200
Tuong Lien71204232019-08-15 10:24:08 +07001201 if (link_is_bc_sndlink(l)) {
1202 r->state = LINK_RESET;
Tuong Liend7626b52020-05-26 16:38:34 +07001203 *rc |= TIPC_LINK_DOWN_EVT;
Tuong Lien71204232019-08-15 10:24:08 +07001204 } else {
Tuong Liend7626b52020-05-26 16:38:34 +07001205 *rc |= tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001206 }
1207
Tuong Lien71204232019-08-15 10:24:08 +07001208 return true;
Allan Stephensd356eeb2006-06-25 23:40:01 -07001209}
1210
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001211/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001212 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001213 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001214 * Node lock must be held
1215 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001216static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001217 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001218{
Jon Maloy399574d2017-10-13 11:04:32 +02001219 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001220 struct tipc_msg *hdr = buf_msg(skb);
1221
1222 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001223 case TIPC_LOW_IMPORTANCE:
1224 case TIPC_MEDIUM_IMPORTANCE:
1225 case TIPC_HIGH_IMPORTANCE:
1226 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001227 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001228 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001229 return true;
1230 }
Gustavo A. R. Silvaf79e3362019-01-23 01:09:31 -06001231 /* fall through */
Jon Maloy2f487712017-10-13 11:04:31 +02001232 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001233 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001234 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001235 case GROUP_PROTOCOL:
1236 skb_queue_tail(mc_inputq, skb);
1237 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001238 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001239 l->bc_rcvlink->state = LINK_ESTABLISHED;
1240 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001241 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001242 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001243 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001244 case MSG_FRAGMENTER:
1245 case BCAST_PROTOCOL:
1246 return false;
1247 default:
1248 pr_warn("Dropping received illegal msg type\n");
1249 kfree_skb(skb);
Hoang Le7384b532019-02-11 09:18:28 +07001250 return true;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001251 };
1252}
1253
1254/* tipc_link_input - process packet that has passed link protocol check
1255 *
1256 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001257 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001258static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
Tuong Lien58ee86b2019-04-04 11:09:53 +07001259 struct sk_buff_head *inputq,
1260 struct sk_buff **reasm_skb)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001261{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001262 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001263 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001264 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001265 int usr = msg_user(hdr);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001266 int pos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001267
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001268 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001269 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001270 l->stats.recv_bundles++;
1271 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001272 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001273 tipc_data_input(l, iskb, &tmpq);
1274 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001275 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001276 } else if (usr == MSG_FRAGMENTER) {
1277 l->stats.recv_fragments++;
1278 if (tipc_buf_append(reasm_skb, &skb)) {
1279 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001280 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001281 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1282 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001283 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001284 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001285 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001286 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001287 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001288 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001289 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001290 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001291
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001292 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001293 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001294}
1295
Tuong Lien58ee86b2019-04-04 11:09:53 +07001296/* tipc_link_tnl_rcv() - receive TUNNEL_PROTOCOL message, drop or process the
1297 * inner message along with the ones in the old link's
1298 * deferdq
1299 * @l: tunnel link
1300 * @skb: TUNNEL_PROTOCOL message
1301 * @inputq: queue to put messages ready for delivery
1302 */
1303static int tipc_link_tnl_rcv(struct tipc_link *l, struct sk_buff *skb,
1304 struct sk_buff_head *inputq)
1305{
1306 struct sk_buff **reasm_skb = &l->failover_reasm_skb;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001307 struct sk_buff **reasm_tnlmsg = &l->reasm_tnlmsg;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001308 struct sk_buff_head *fdefq = &l->failover_deferdq;
1309 struct tipc_msg *hdr = buf_msg(skb);
1310 struct sk_buff *iskb;
1311 int ipos = 0;
1312 int rc = 0;
1313 u16 seqno;
1314
Tuong Lien2320bcd2019-07-24 08:56:12 +07001315 if (msg_type(hdr) == SYNCH_MSG) {
1316 kfree_skb(skb);
1317 return 0;
1318 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001319
Tuong Lien2320bcd2019-07-24 08:56:12 +07001320 /* Not a fragment? */
1321 if (likely(!msg_nof_fragms(hdr))) {
1322 if (unlikely(!tipc_msg_extract(skb, &iskb, &ipos))) {
1323 pr_warn_ratelimited("Unable to extract msg, defq: %d\n",
1324 skb_queue_len(fdefq));
1325 return 0;
1326 }
1327 kfree_skb(skb);
1328 } else {
1329 /* Set fragment type for buf_append */
1330 if (msg_fragm_no(hdr) == 1)
1331 msg_set_type(hdr, FIRST_FRAGMENT);
1332 else if (msg_fragm_no(hdr) < msg_nof_fragms(hdr))
1333 msg_set_type(hdr, FRAGMENT);
1334 else
1335 msg_set_type(hdr, LAST_FRAGMENT);
1336
1337 if (!tipc_buf_append(reasm_tnlmsg, &skb)) {
1338 /* Successful but non-complete reassembly? */
1339 if (*reasm_tnlmsg || link_is_bc_rcvlink(l))
1340 return 0;
1341 pr_warn_ratelimited("Unable to reassemble tunnel msg\n");
1342 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1343 }
1344 iskb = skb;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001345 }
1346
1347 do {
1348 seqno = buf_seqno(iskb);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001349 if (unlikely(less(seqno, l->drop_point))) {
1350 kfree_skb(iskb);
1351 continue;
1352 }
Tuong Lien58ee86b2019-04-04 11:09:53 +07001353 if (unlikely(seqno != l->drop_point)) {
1354 __tipc_skb_queue_sorted(fdefq, seqno, iskb);
1355 continue;
1356 }
1357
1358 l->drop_point++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001359 if (!tipc_data_input(l, iskb, inputq))
1360 rc |= tipc_link_input(l, iskb, inputq, reasm_skb);
1361 if (unlikely(rc))
1362 break;
1363 } while ((iskb = __tipc_skb_dequeue(fdefq, l->drop_point)));
1364
Tuong Lien58ee86b2019-04-04 11:09:53 +07001365 return rc;
1366}
1367
Tuong Liend7626b52020-05-26 16:38:34 +07001368/**
1369 * tipc_get_gap_ack_blks - get Gap ACK blocks from PROTOCOL/STATE_MSG
1370 * @ga: returned pointer to the Gap ACK blocks if any
1371 * @l: the tipc link
1372 * @hdr: the PROTOCOL/STATE_MSG header
1373 * @uc: desired Gap ACK blocks type, i.e. unicast (= 1) or broadcast (= 0)
1374 *
1375 * Return: the total Gap ACK blocks size
1376 */
1377u16 tipc_get_gap_ack_blks(struct tipc_gap_ack_blks **ga, struct tipc_link *l,
1378 struct tipc_msg *hdr, bool uc)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001379{
Tuong Liend7626b52020-05-26 16:38:34 +07001380 struct tipc_gap_ack_blks *p;
1381 u16 sz = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001382
Tuong Liend7626b52020-05-26 16:38:34 +07001383 /* Does peer support the Gap ACK blocks feature? */
1384 if (l->peer_caps & TIPC_GAP_ACK_BLOCK) {
1385 p = (struct tipc_gap_ack_blks *)msg_data(hdr);
1386 sz = ntohs(p->len);
1387 /* Sanity check */
1388 if (sz == tipc_gap_ack_blks_sz(p->ugack_cnt + p->bgack_cnt)) {
1389 /* Good, check if the desired type exists */
1390 if ((uc && p->ugack_cnt) || (!uc && p->bgack_cnt))
1391 goto ok;
1392 /* Backward compatible: peer might not support bc, but uc? */
1393 } else if (uc && sz == tipc_gap_ack_blks_sz(p->ugack_cnt)) {
1394 if (p->ugack_cnt) {
1395 p->bgack_cnt = 0;
1396 goto ok;
1397 }
1398 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001399 }
Tuong Liend7626b52020-05-26 16:38:34 +07001400 /* Other cases: ignore! */
1401 p = NULL;
1402
1403ok:
1404 *ga = p;
1405 return sz;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001406}
1407
Tuong Liend7626b52020-05-26 16:38:34 +07001408static u8 __tipc_build_gap_ack_blks(struct tipc_gap_ack_blks *ga,
1409 struct tipc_link *l, u8 start_index)
Tuong Lien91959482019-04-04 11:09:51 +07001410{
Tuong Liend7626b52020-05-26 16:38:34 +07001411 struct tipc_gap_ack *gacks = &ga->gacks[start_index];
Tuong Lien91959482019-04-04 11:09:51 +07001412 struct sk_buff *skb = skb_peek(&l->deferdq);
Tuong Liend7626b52020-05-26 16:38:34 +07001413 u16 expect, seqno = 0;
Tuong Lien91959482019-04-04 11:09:51 +07001414 u8 n = 0;
1415
Tuong Liend7626b52020-05-26 16:38:34 +07001416 if (!skb)
1417 return 0;
Tuong Lien91959482019-04-04 11:09:51 +07001418
1419 expect = buf_seqno(skb);
1420 skb_queue_walk(&l->deferdq, skb) {
1421 seqno = buf_seqno(skb);
1422 if (unlikely(more(seqno, expect))) {
Tuong Liend7626b52020-05-26 16:38:34 +07001423 gacks[n].ack = htons(expect - 1);
1424 gacks[n].gap = htons(seqno - expect);
1425 if (++n >= MAX_GAP_ACK_BLKS / 2) {
Tuong Liend7626b52020-05-26 16:38:34 +07001426 pr_info_ratelimited("Gacks on %s: %d, ql: %d!\n",
Tuong Lien03b6fef2020-05-26 16:38:37 +07001427 l->name, n,
Tuong Liend7626b52020-05-26 16:38:34 +07001428 skb_queue_len(&l->deferdq));
1429 return n;
Tuong Lien91959482019-04-04 11:09:51 +07001430 }
1431 } else if (unlikely(less(seqno, expect))) {
1432 pr_warn("Unexpected skb in deferdq!\n");
1433 continue;
1434 }
1435 expect = seqno + 1;
1436 }
1437
1438 /* last block */
Tuong Liend7626b52020-05-26 16:38:34 +07001439 gacks[n].ack = htons(seqno);
1440 gacks[n].gap = 0;
Tuong Lien91959482019-04-04 11:09:51 +07001441 n++;
Tuong Liend7626b52020-05-26 16:38:34 +07001442 return n;
1443}
Tuong Lien91959482019-04-04 11:09:51 +07001444
Tuong Liend7626b52020-05-26 16:38:34 +07001445/* tipc_build_gap_ack_blks - build Gap ACK blocks
1446 * @l: tipc unicast link
1447 * @hdr: the tipc message buffer to store the Gap ACK blocks after built
1448 *
1449 * The function builds Gap ACK blocks for both the unicast & broadcast receiver
1450 * links of a certain peer, the buffer after built has the network data format
1451 * as found at the struct tipc_gap_ack_blks definition.
1452 *
1453 * returns the actual allocated memory size
1454 */
1455static u16 tipc_build_gap_ack_blks(struct tipc_link *l, struct tipc_msg *hdr)
1456{
1457 struct tipc_link *bcl = l->bc_rcvlink;
1458 struct tipc_gap_ack_blks *ga;
1459 u16 len;
1460
1461 ga = (struct tipc_gap_ack_blks *)msg_data(hdr);
1462
1463 /* Start with broadcast link first */
1464 tipc_bcast_lock(bcl->net);
1465 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1466 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1467 ga->bgack_cnt = __tipc_build_gap_ack_blks(ga, bcl, 0);
1468 tipc_bcast_unlock(bcl->net);
1469
1470 /* Now for unicast link, but an explicit NACK only (???) */
1471 ga->ugack_cnt = (msg_seq_gap(hdr)) ?
1472 __tipc_build_gap_ack_blks(ga, l, ga->bgack_cnt) : 0;
1473
1474 /* Total len */
1475 len = tipc_gap_ack_blks_sz(ga->bgack_cnt + ga->ugack_cnt);
Tuong Lien91959482019-04-04 11:09:51 +07001476 ga->len = htons(len);
Tuong Lien91959482019-04-04 11:09:51 +07001477 return len;
1478}
1479
1480/* tipc_link_advance_transmq - advance TIPC link transmq queue by releasing
1481 * acked packets, also doing retransmissions if
1482 * gaps found
1483 * @l: tipc link with transmq queue to be advanced
Tuong Liend7626b52020-05-26 16:38:34 +07001484 * @r: tipc link "receiver" i.e. in case of broadcast (= "l" if unicast)
Tuong Lien91959482019-04-04 11:09:51 +07001485 * @acked: seqno of last packet acked by peer without any gaps before
1486 * @gap: # of gap packets
1487 * @ga: buffer pointer to Gap ACK blocks from peer
1488 * @xmitq: queue for accumulating the retransmitted packets if any
Tuong Liend7626b52020-05-26 16:38:34 +07001489 * @retransmitted: returned boolean value if a retransmission is really issued
1490 * @rc: returned code e.g. TIPC_LINK_DOWN_EVT if a repeated retransmit failures
1491 * happens (- unlikely case)
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001492 *
Tuong Liend7626b52020-05-26 16:38:34 +07001493 * Return: the number of packets released from the link transmq
Tuong Lien91959482019-04-04 11:09:51 +07001494 */
Tuong Liend7626b52020-05-26 16:38:34 +07001495static int tipc_link_advance_transmq(struct tipc_link *l, struct tipc_link *r,
1496 u16 acked, u16 gap,
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001497 struct tipc_gap_ack_blks *ga,
Tuong Liend7626b52020-05-26 16:38:34 +07001498 struct sk_buff_head *xmitq,
1499 bool *retransmitted, int *rc)
Tuong Lien91959482019-04-04 11:09:51 +07001500{
Tuong Liend7626b52020-05-26 16:38:34 +07001501 struct tipc_gap_ack_blks *last_ga = r->last_ga, *this_ga = NULL;
1502 struct tipc_gap_ack *gacks = NULL;
Tuong Lien91959482019-04-04 11:09:51 +07001503 struct sk_buff *skb, *_skb, *tmp;
1504 struct tipc_msg *hdr;
Tuong Liend7626b52020-05-26 16:38:34 +07001505 u32 qlen = skb_queue_len(&l->transmq);
1506 u16 nacked = acked, ngap = gap, gack_cnt = 0;
Tuong Lien91959482019-04-04 11:09:51 +07001507 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
1508 u16 ack = l->rcv_nxt - 1;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001509 u16 seqno, n = 0;
Tuong Liend7626b52020-05-26 16:38:34 +07001510 u16 end = r->acked, start = end, offset = r->last_gap;
1511 u16 si = (last_ga) ? last_ga->start_index : 0;
1512 bool is_uc = !link_is_bc_sndlink(l);
1513 bool bc_has_acked = false;
Tuong Lien6a6b5c82019-06-17 12:15:42 +07001514
Tuong Lienc6ed7a52020-05-26 16:38:35 +07001515 trace_tipc_link_retrans(r, acked + 1, acked + gap, &l->transmq);
1516
Tuong Liend7626b52020-05-26 16:38:34 +07001517 /* Determine Gap ACK blocks if any for the particular link */
1518 if (ga && is_uc) {
1519 /* Get the Gap ACKs, uc part */
1520 gack_cnt = ga->ugack_cnt;
1521 gacks = &ga->gacks[ga->bgack_cnt];
1522 } else if (ga) {
1523 /* Copy the Gap ACKs, bc part, for later renewal if needed */
1524 this_ga = kmemdup(ga, tipc_gap_ack_blks_sz(ga->bgack_cnt),
1525 GFP_ATOMIC);
1526 if (likely(this_ga)) {
1527 this_ga->start_index = 0;
1528 /* Start with the bc Gap ACKs */
1529 gack_cnt = this_ga->bgack_cnt;
1530 gacks = &this_ga->gacks[0];
1531 } else {
1532 /* Hmm, we can get in trouble..., simply ignore it */
1533 pr_warn_ratelimited("Ignoring bc Gap ACKs, no memory\n");
1534 }
1535 }
1536
1537 /* Advance the link transmq */
Tuong Lien91959482019-04-04 11:09:51 +07001538 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1539 seqno = buf_seqno(skb);
1540
1541next_gap_ack:
Tuong Liend7626b52020-05-26 16:38:34 +07001542 if (less_eq(seqno, nacked)) {
1543 if (is_uc)
1544 goto release;
1545 /* Skip packets peer has already acked */
1546 if (!more(seqno, r->acked))
1547 continue;
1548 /* Get the next of last Gap ACK blocks */
1549 while (more(seqno, end)) {
1550 if (!last_ga || si >= last_ga->bgack_cnt)
1551 break;
1552 start = end + offset + 1;
1553 end = ntohs(last_ga->gacks[si].ack);
1554 offset = ntohs(last_ga->gacks[si].gap);
1555 si++;
1556 WARN_ONCE(more(start, end) ||
1557 (!offset &&
1558 si < last_ga->bgack_cnt) ||
1559 si > MAX_GAP_ACK_BLKS,
1560 "Corrupted Gap ACK: %d %d %d %d %d\n",
1561 start, end, offset, si,
1562 last_ga->bgack_cnt);
1563 }
1564 /* Check against the last Gap ACK block */
1565 if (in_range(seqno, start, end))
1566 continue;
1567 /* Update/release the packet peer is acking */
1568 bc_has_acked = true;
1569 if (--TIPC_SKB_CB(skb)->ackers)
1570 continue;
1571release:
Tuong Lien91959482019-04-04 11:09:51 +07001572 /* release skb */
1573 __skb_unlink(skb, &l->transmq);
1574 kfree_skb(skb);
Tuong Liend7626b52020-05-26 16:38:34 +07001575 } else if (less_eq(seqno, nacked + ngap)) {
1576 /* First gap: check if repeated retrans failures? */
1577 if (unlikely(seqno == acked + 1 &&
1578 link_retransmit_failure(l, r, rc))) {
1579 /* Ignore this bc Gap ACKs if any */
1580 kfree(this_ga);
1581 this_ga = NULL;
1582 break;
1583 }
Tuong Lien71204232019-08-15 10:24:08 +07001584 /* retransmit skb if unrestricted*/
Tuong Lien382f5982019-04-04 11:09:52 +07001585 if (time_before(jiffies, TIPC_SKB_CB(skb)->nxt_retr))
1586 continue;
Tuong Liend7626b52020-05-26 16:38:34 +07001587 TIPC_SKB_CB(skb)->nxt_retr = (is_uc) ?
1588 TIPC_UC_RETR_TIME : TIPC_BC_RETR_LIM;
Tuong Lienfc1b6d62019-11-08 12:05:11 +07001589 _skb = pskb_copy(skb, GFP_ATOMIC);
Tuong Lien91959482019-04-04 11:09:51 +07001590 if (!_skb)
1591 continue;
1592 hdr = buf_msg(_skb);
1593 msg_set_ack(hdr, ack);
1594 msg_set_bcast_ack(hdr, bc_ack);
1595 _skb->priority = TC_PRIO_CONTROL;
1596 __skb_queue_tail(xmitq, _skb);
1597 l->stats.retransmitted++;
Tuong Lien03b6fef2020-05-26 16:38:37 +07001598 if (!is_uc)
1599 r->stats.retransmitted++;
Tuong Liend7626b52020-05-26 16:38:34 +07001600 *retransmitted = true;
Tuong Lien71204232019-08-15 10:24:08 +07001601 /* Increase actual retrans counter & mark first time */
1602 if (!TIPC_SKB_CB(skb)->retr_cnt++)
1603 TIPC_SKB_CB(skb)->retr_stamp = jiffies;
Tuong Lien91959482019-04-04 11:09:51 +07001604 } else {
1605 /* retry with Gap ACK blocks if any */
Tuong Liend7626b52020-05-26 16:38:34 +07001606 if (n >= gack_cnt)
Tuong Lien91959482019-04-04 11:09:51 +07001607 break;
Tuong Liend7626b52020-05-26 16:38:34 +07001608 nacked = ntohs(gacks[n].ack);
1609 ngap = ntohs(gacks[n].gap);
Tuong Lien91959482019-04-04 11:09:51 +07001610 n++;
1611 goto next_gap_ack;
1612 }
1613 }
Tuong Liend7626b52020-05-26 16:38:34 +07001614
1615 /* Renew last Gap ACK blocks for bc if needed */
1616 if (bc_has_acked) {
1617 if (this_ga) {
1618 kfree(last_ga);
1619 r->last_ga = this_ga;
1620 r->last_gap = gap;
1621 } else if (last_ga) {
1622 if (less(acked, start)) {
1623 si--;
1624 offset = start - acked - 1;
1625 } else if (less(acked, end)) {
1626 acked = end;
1627 }
1628 if (si < last_ga->bgack_cnt) {
1629 last_ga->start_index = si;
1630 r->last_gap = offset;
1631 } else {
1632 kfree(last_ga);
1633 r->last_ga = NULL;
1634 r->last_gap = 0;
1635 }
1636 } else {
1637 r->last_gap = 0;
1638 }
1639 r->acked = acked;
1640 } else {
1641 kfree(this_ga);
1642 }
1643
1644 return qlen - skb_queue_len(&l->transmq);
Tuong Lien91959482019-04-04 11:09:51 +07001645}
1646
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001647/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001648 *
1649 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1650 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001651 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001652int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001653{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001654 if (!l)
1655 return 0;
1656
1657 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1658 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001659 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001660 return 0;
1661 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001662
1663 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1664 l->snd_nxt = l->rcv_nxt;
1665 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001666 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001667 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001668 l->rcv_unacked = 0;
1669 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001670 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001671 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001672}
1673
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001674/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1675 */
1676void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1677{
1678 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001679 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001680
1681 if (l->state == LINK_ESTABLISHING)
1682 mtyp = ACTIVATE_MSG;
1683
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001684 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001685
1686 /* Inform peer that this endpoint is going down if applicable */
1687 skb = skb_peek_tail(xmitq);
1688 if (skb && (l->state == LINK_RESET))
1689 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001690}
1691
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001692/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001693 * Note that sending of broadcast NACK is coordinated among nodes, to
1694 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001695 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001696static int tipc_link_build_nack_msg(struct tipc_link *l,
1697 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001698{
1699 u32 def_cnt = ++l->stats.deferred_recv;
Jon Maloy02288242019-12-10 00:52:44 +01001700 struct sk_buff_head *dfq = &l->deferdq;
1701 u32 defq_len = skb_queue_len(dfq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001702 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001703
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001704 if (link_is_bc_rcvlink(l)) {
1705 match1 = def_cnt & 0xf;
1706 match2 = tipc_own_addr(l->net) & 0xf;
1707 if (match1 == match2)
1708 return TIPC_LINK_SND_STATE;
1709 return 0;
1710 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001711
Jon Maloy02288242019-12-10 00:52:44 +01001712 if (defq_len >= 3 && !((defq_len - 3) % 16)) {
1713 u16 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1714
1715 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0,
1716 rcvgap, 0, 0, xmitq);
1717 }
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001718 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001719}
1720
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001721/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001722 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001723 * @skb: TIPC packet
1724 * @xmitq: queue to place packets to be sent after this call
1725 */
1726int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1727 struct sk_buff_head *xmitq)
1728{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001729 struct sk_buff_head *defq = &l->deferdq;
Tuong Lien382f5982019-04-04 11:09:52 +07001730 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001731 u16 seqno, rcv_nxt, win_lim;
Jon Maloy16ad3f42019-12-10 00:52:46 +01001732 int released = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001733 int rc = 0;
1734
Tuong Lien382f5982019-04-04 11:09:52 +07001735 /* Verify and update link state */
1736 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1737 return tipc_link_proto_rcv(l, skb, xmitq);
1738
1739 /* Don't send probe at next timeout expiration */
1740 l->silent_intv_cnt = 0;
1741
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001742 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001743 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001744 seqno = msg_seqno(hdr);
1745 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001746 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001747
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001748 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001749 if (l->state == LINK_ESTABLISHING)
1750 rc = TIPC_LINK_UP_EVT;
Jon Maloy16ad3f42019-12-10 00:52:46 +01001751 kfree_skb(skb);
1752 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001753 }
1754
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001755 /* Drop if outside receive window */
1756 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1757 l->stats.duplicates++;
Jon Maloy16ad3f42019-12-10 00:52:46 +01001758 kfree_skb(skb);
1759 break;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001760 }
Tuong Liend7626b52020-05-26 16:38:34 +07001761 released += tipc_link_advance_transmq(l, l, msg_ack(hdr), 0,
1762 NULL, NULL, NULL, NULL);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001763
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001764 /* Defer delivery if sequence gap */
1765 if (unlikely(seqno != rcv_nxt)) {
Tuong Lien03b6fef2020-05-26 16:38:37 +07001766 if (!__tipc_skb_queue_sorted(defq, seqno, skb))
1767 l->stats.duplicates++;
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001768 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001769 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001770 }
1771
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001772 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001773 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001774 l->stats.recv_pkts++;
Tuong Lien58ee86b2019-04-04 11:09:53 +07001775
1776 if (unlikely(msg_user(hdr) == TUNNEL_PROTOCOL))
1777 rc |= tipc_link_tnl_rcv(l, skb, l->inputq);
1778 else if (!tipc_data_input(l, skb, l->inputq))
1779 rc |= tipc_link_input(l, skb, l->inputq, &l->reasm_buf);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001780 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001781 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001782 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001783 break;
Tuong Lien382f5982019-04-04 11:09:52 +07001784 } while ((skb = __tipc_skb_dequeue(defq, l->rcv_nxt)));
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001785
Jon Maloy16ad3f42019-12-10 00:52:46 +01001786 /* Forward queues and wake up waiting users */
1787 if (released) {
1788 tipc_link_update_cwin(l, released, 0);
1789 tipc_link_advance_backlog(l, xmitq);
1790 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1791 link_prepare_wakeup(l);
1792 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001793 return rc;
1794}
1795
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001796static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001797 bool probe_reply, u16 rcvgap,
1798 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001799 struct sk_buff_head *xmitq)
1800{
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001801 struct tipc_mon_state *mstate = &l->mon_state;
Tuong Lien03b6fef2020-05-26 16:38:37 +07001802 struct sk_buff_head *dfq = &l->deferdq;
1803 struct tipc_link *bcl = l->bc_rcvlink;
1804 struct tipc_msg *hdr;
1805 struct sk_buff *skb;
1806 bool node_up = link_is_up(bcl);
1807 u16 glen = 0, bc_rcvgap = 0;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001808 int dlen = 0;
1809 void *data;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001810
1811 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001812 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001813 return;
1814
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001815 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1816 return;
1817
Jon Maloy02288242019-12-10 00:52:44 +01001818 if ((probe || probe_reply) && !skb_queue_empty(dfq))
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001819 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1820
1821 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Tuong Lien91959482019-04-04 11:09:51 +07001822 tipc_max_domain_size + MAX_GAP_ACK_BLKS_SZ,
1823 l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001824 if (!skb)
1825 return;
1826
1827 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001828 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001829 msg_set_session(hdr, l->session);
1830 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001831 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001832 msg_set_next_sent(hdr, l->snd_nxt);
1833 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001834 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001835 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001836 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001837 msg_set_link_tolerance(hdr, tolerance);
1838 msg_set_linkprio(hdr, priority);
1839 msg_set_redundant_link(hdr, node_up);
1840 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001841 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001842
1843 if (mtyp == STATE_MSG) {
Jon Maloy9012de52018-07-10 01:07:35 +02001844 if (l->peer_caps & TIPC_LINK_PROTO_SEQNO)
1845 msg_set_seqno(hdr, l->snd_nxt_state++);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001846 msg_set_seq_gap(hdr, rcvgap);
Tuong Lien03b6fef2020-05-26 16:38:37 +07001847 bc_rcvgap = link_bc_rcv_gap(bcl);
1848 msg_set_bc_gap(hdr, bc_rcvgap);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001849 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001850 msg_set_is_keepalive(hdr, probe || probe_reply);
Tuong Lien91959482019-04-04 11:09:51 +07001851 if (l->peer_caps & TIPC_GAP_ACK_BLOCK)
Tuong Liend7626b52020-05-26 16:38:34 +07001852 glen = tipc_build_gap_ack_blks(l, hdr);
Tuong Lien91959482019-04-04 11:09:51 +07001853 tipc_mon_prep(l->net, data + glen, &dlen, mstate, l->bearer_id);
1854 msg_set_size(hdr, INT_H_SIZE + glen + dlen);
1855 skb_trim(skb, INT_H_SIZE + glen + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001856 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001857 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001858 } else {
1859 /* RESET_MSG or ACTIVATE_MSG */
Tuong Lien91986ee2019-02-11 13:29:43 +07001860 if (mtyp == ACTIVATE_MSG) {
1861 msg_set_dest_session_valid(hdr, 1);
1862 msg_set_dest_session(hdr, l->peer_session);
1863 }
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001864 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001865 strcpy(data, l->if_name);
1866 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1867 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001868 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001869 if (probe)
1870 l->stats.sent_probes++;
1871 if (rcvgap)
1872 l->stats.sent_nacks++;
Tuong Lien03b6fef2020-05-26 16:38:37 +07001873 if (bc_rcvgap)
1874 bcl->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001875 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001876 __skb_queue_tail(xmitq, skb);
Tuong Lien26574db2018-12-19 09:17:57 +07001877 trace_tipc_proto_build(skb, false, l->name);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001878}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001879
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001880void tipc_link_create_dummy_tnl_msg(struct tipc_link *l,
1881 struct sk_buff_head *xmitq)
1882{
1883 u32 onode = tipc_own_addr(l->net);
1884 struct tipc_msg *hdr, *ihdr;
1885 struct sk_buff_head tnlq;
1886 struct sk_buff *skb;
1887 u32 dnode = l->addr;
1888
Jon Maloye654f9f2019-08-15 16:42:50 +02001889 __skb_queue_head_init(&tnlq);
LUU Duc Canhc140eb12018-09-26 21:00:54 +02001890 skb = tipc_msg_create(TUNNEL_PROTOCOL, FAILOVER_MSG,
1891 INT_H_SIZE, BASIC_H_SIZE,
1892 dnode, onode, 0, 0, 0);
1893 if (!skb) {
1894 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1895 return;
1896 }
1897
1898 hdr = buf_msg(skb);
1899 msg_set_msgcnt(hdr, 1);
1900 msg_set_bearer_id(hdr, l->peer_bearer_id);
1901
1902 ihdr = (struct tipc_msg *)msg_data(hdr);
1903 tipc_msg_init(onode, ihdr, TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1904 BASIC_H_SIZE, dnode);
1905 msg_set_errcode(ihdr, TIPC_ERR_NO_PORT);
1906 __skb_queue_tail(&tnlq, skb);
1907 tipc_link_xmit(l, &tnlq, xmitq);
1908}
1909
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001910/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001911 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001912 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001913void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1914 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001915{
Tuong Lien58ee86b2019-04-04 11:09:53 +07001916 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001917 struct sk_buff *skb, *tnlskb;
1918 struct tipc_msg *hdr, tnlhdr;
1919 struct sk_buff_head *queue = &l->transmq;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001920 struct sk_buff_head tmpxq, tnlq, frags;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001921 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001922 bool pktcnt_need_update = false;
Tuong Lien4929a932019-07-24 08:56:11 +07001923 u16 syncpt;
Tuong Lien2320bcd2019-07-24 08:56:12 +07001924 int rc;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001925
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001926 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001927 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001928
Jon Maloye654f9f2019-08-15 16:42:50 +02001929 __skb_queue_head_init(&tnlq);
Tuong Lien4929a932019-07-24 08:56:11 +07001930 /* Link Synching:
1931 * From now on, send only one single ("dummy") SYNCH message
1932 * to peer. The SYNCH message does not contain any data, just
1933 * a header conveying the synch point to the peer.
1934 */
1935 if (mtyp == SYNCH_MSG && (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1936 tnlskb = tipc_msg_create(TUNNEL_PROTOCOL, SYNCH_MSG,
1937 INT_H_SIZE, 0, l->addr,
1938 tipc_own_addr(l->net),
1939 0, 0, 0);
1940 if (!tnlskb) {
1941 pr_warn("%sunable to create dummy SYNCH_MSG\n",
1942 link_co_err);
1943 return;
1944 }
1945
1946 hdr = buf_msg(tnlskb);
1947 syncpt = l->snd_nxt + skb_queue_len(&l->backlogq) - 1;
1948 msg_set_syncpt(hdr, syncpt);
1949 msg_set_bearer_id(hdr, l->peer_bearer_id);
1950 __skb_queue_tail(&tnlq, tnlskb);
1951 tipc_link_xmit(tnl, &tnlq, xmitq);
1952 return;
1953 }
1954
Tuong Liend0d605c2019-11-06 18:12:17 +07001955 __skb_queue_head_init(&tmpxq);
1956 __skb_queue_head_init(&frags);
1957 /* At least one packet required for safe algorithm => add dummy */
1958 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1959 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1960 0, 0, TIPC_ERR_NO_PORT);
1961 if (!skb) {
1962 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1963 return;
1964 }
1965 __skb_queue_tail(&tnlq, skb);
1966 tipc_link_xmit(l, &tnlq, &tmpxq);
1967 __skb_queue_purge(&tmpxq);
1968
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001969 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001970 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001971 mtyp, INT_H_SIZE, l->addr);
Tuong Lien58ee86b2019-04-04 11:09:53 +07001972 if (mtyp == SYNCH_MSG)
1973 pktcnt = l->snd_nxt - buf_seqno(skb_peek(&l->transmq));
1974 else
1975 pktcnt = skb_queue_len(&l->transmq);
1976 pktcnt += skb_queue_len(&l->backlogq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001977 msg_set_msgcnt(&tnlhdr, pktcnt);
1978 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1979tnl:
1980 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001981 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001982 hdr = buf_msg(skb);
1983 if (queue == &l->backlogq)
1984 msg_set_seqno(hdr, seqno++);
1985 pktlen = msg_size(hdr);
Tuong Lien2320bcd2019-07-24 08:56:12 +07001986
1987 /* Tunnel link MTU is not large enough? This could be
1988 * due to:
1989 * 1) Link MTU has just changed or set differently;
1990 * 2) Or FAILOVER on the top of a SYNCH message
1991 *
1992 * The 2nd case should not happen if peer supports
1993 * TIPC_TUNNEL_ENHANCED
1994 */
1995 if (pktlen > tnl->mtu - INT_H_SIZE) {
1996 if (mtyp == FAILOVER_MSG &&
1997 (tnl->peer_caps & TIPC_TUNNEL_ENHANCED)) {
1998 rc = tipc_msg_fragment(skb, &tnlhdr, tnl->mtu,
1999 &frags);
2000 if (rc) {
2001 pr_warn("%sunable to frag msg: rc %d\n",
2002 link_co_err, rc);
2003 return;
2004 }
2005 pktcnt += skb_queue_len(&frags) - 1;
2006 pktcnt_need_update = true;
2007 skb_queue_splice_tail_init(&frags, &tnlq);
2008 continue;
2009 }
2010 /* Unluckily, peer doesn't have TIPC_TUNNEL_ENHANCED
2011 * => Just warn it and return!
2012 */
2013 pr_warn_ratelimited("%stoo large msg <%d, %d>: %d!\n",
2014 link_co_err, msg_user(hdr),
2015 msg_type(hdr), msg_size(hdr));
2016 return;
2017 }
2018
Jon Paul Maloy6e498152015-07-30 18:24:19 -04002019 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01002020 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04002021 if (!tnlskb) {
2022 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002023 return;
2024 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04002025 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
2026 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
2027 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002028 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04002029 if (queue != &l->backlogq) {
2030 queue = &l->backlogq;
2031 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04002032 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01002033
Tuong Lien2320bcd2019-07-24 08:56:12 +07002034 if (pktcnt_need_update)
2035 skb_queue_walk(&tnlq, skb) {
2036 hdr = buf_msg(skb);
2037 msg_set_msgcnt(hdr, pktcnt);
2038 }
2039
Jon Paul Maloy6e498152015-07-30 18:24:19 -04002040 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05002041
Jon Paul Maloy6e498152015-07-30 18:24:19 -04002042 if (mtyp == FAILOVER_MSG) {
2043 tnl->drop_point = l->rcv_nxt;
2044 tnl->failover_reasm_skb = l->reasm_buf;
2045 l->reasm_buf = NULL;
Tuong Lien58ee86b2019-04-04 11:09:53 +07002046
2047 /* Failover the link's deferdq */
2048 if (unlikely(!skb_queue_empty(fdefq))) {
2049 pr_warn("Link failover deferdq not empty: %d!\n",
2050 skb_queue_len(fdefq));
2051 __skb_queue_purge(fdefq);
2052 }
2053 skb_queue_splice_init(&l->deferdq, fdefq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05002054 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01002055}
2056
Tuong Lienc0b14a082019-05-02 17:23:23 +07002057/**
2058 * tipc_link_failover_prepare() - prepare tnl for link failover
2059 *
2060 * This is a special version of the precursor - tipc_link_tnl_prepare(),
2061 * see the tipc_node_link_failover() for details
2062 *
2063 * @l: failover link
2064 * @tnl: tunnel link
2065 * @xmitq: queue for messages to be xmited
2066 */
2067void tipc_link_failover_prepare(struct tipc_link *l, struct tipc_link *tnl,
2068 struct sk_buff_head *xmitq)
2069{
2070 struct sk_buff_head *fdefq = &tnl->failover_deferdq;
2071
2072 tipc_link_create_dummy_tnl_msg(tnl, xmitq);
2073
Geert Uytterhoeven8ebed8a2019-10-24 17:30:43 +02002074 /* This failover link endpoint was never established before,
Tuong Lienc0b14a082019-05-02 17:23:23 +07002075 * so it has not received anything from peer.
2076 * Otherwise, it must be a normal failover situation or the
2077 * node has entered SELF_DOWN_PEER_LEAVING and both peer nodes
2078 * would have to start over from scratch instead.
2079 */
Tuong Lienc0b14a082019-05-02 17:23:23 +07002080 tnl->drop_point = 1;
2081 tnl->failover_reasm_skb = NULL;
2082
2083 /* Initiate the link's failover deferdq */
2084 if (unlikely(!skb_queue_empty(fdefq))) {
2085 pr_warn("Link failover deferdq not empty: %d!\n",
2086 skb_queue_len(fdefq));
2087 __skb_queue_purge(fdefq);
2088 }
2089}
2090
Jon Maloy7ea817f2018-07-10 01:07:36 +02002091/* tipc_link_validate_msg(): validate message against current link state
2092 * Returns true if message should be accepted, otherwise false
2093 */
2094bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr)
2095{
2096 u16 curr_session = l->peer_session;
2097 u16 session = msg_session(hdr);
2098 int mtyp = msg_type(hdr);
2099
2100 if (msg_user(hdr) != LINK_PROTOCOL)
2101 return true;
2102
2103 switch (mtyp) {
2104 case RESET_MSG:
2105 if (!l->in_session)
2106 return true;
2107 /* Accept only RESET with new session number */
2108 return more(session, curr_session);
2109 case ACTIVATE_MSG:
2110 if (!l->in_session)
2111 return true;
2112 /* Accept only ACTIVATE with new or current session number */
2113 return !less(session, curr_session);
2114 case STATE_MSG:
2115 /* Accept only STATE with current session number */
2116 if (!l->in_session)
2117 return false;
2118 if (session != curr_session)
2119 return false;
LUU Duc Canhd949cfe2018-09-26 22:28:52 +02002120 /* Extra sanity check */
2121 if (!link_is_up(l) && msg_ack(hdr))
2122 return false;
Jon Maloy7ea817f2018-07-10 01:07:36 +02002123 if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO))
2124 return true;
2125 /* Accept only STATE with new sequence number */
2126 return !less(msg_seqno(hdr), l->rcv_nxt_state);
2127 default:
2128 return false;
2129 }
2130}
2131
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002132/* tipc_link_proto_rcv(): receive link level protocol message :
2133 * Note that network plane id propagates through the network, and may
2134 * change at any time. The node with lowest numerical id determines
2135 * network plane
2136 */
2137static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
2138 struct sk_buff_head *xmitq)
2139{
2140 struct tipc_msg *hdr = buf_msg(skb);
Tuong Lien91959482019-04-04 11:09:51 +07002141 struct tipc_gap_ack_blks *ga = NULL;
Tuong Liend7626b52020-05-26 16:38:34 +07002142 bool reply = msg_probe(hdr), retransmitted = false;
2143 u16 dlen = msg_data_sz(hdr), glen = 0;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002144 u16 peers_snd_nxt = msg_next_sent(hdr);
2145 u16 peers_tol = msg_link_tolerance(hdr);
2146 u16 peers_prio = msg_linkprio(hdr);
Tuong Liend7626b52020-05-26 16:38:34 +07002147 u16 gap = msg_seq_gap(hdr);
2148 u16 ack = msg_ack(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04002149 u16 rcv_nxt = l->rcv_nxt;
Tuong Liend7626b52020-05-26 16:38:34 +07002150 u16 rcvgap = 0;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002151 int mtyp = msg_type(hdr);
Tuong Liend7626b52020-05-26 16:38:34 +07002152 int rc = 0, released;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002153 char *if_name;
Tuong Liend7626b52020-05-26 16:38:34 +07002154 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002155
Tuong Lien26574db2018-12-19 09:17:57 +07002156 trace_tipc_proto_rcv(skb, false, l->name);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002157 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002158 goto exit;
2159
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002160 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002161 l->net_plane = msg_net_plane(hdr);
2162
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04002163 skb_linearize(skb);
2164 hdr = buf_msg(skb);
2165 data = msg_data(hdr);
2166
Tuong Lien26574db2018-12-19 09:17:57 +07002167 if (!tipc_link_validate_msg(l, hdr)) {
2168 trace_tipc_skb_dump(skb, false, "PROTO invalid (1)!");
2169 trace_tipc_link_dump(l, TIPC_DUMP_NONE, "PROTO invalid (1)!");
Jon Maloy7ea817f2018-07-10 01:07:36 +02002170 goto exit;
Tuong Lien26574db2018-12-19 09:17:57 +07002171 }
Jon Maloy7ea817f2018-07-10 01:07:36 +02002172
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002173 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002174 case RESET_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002175 case ACTIVATE_MSG:
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002176 /* Complete own link name with peer's interface name */
2177 if_name = strrchr(l->name, ':') + 1;
2178 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
2179 break;
2180 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
2181 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04002182 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002183
2184 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02002185 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002186 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002187 l->bc_rcvlink->tolerance = peers_tol;
2188 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002189 /* Update own priority if peer's priority is higher */
2190 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
2191 l->priority = peers_prio;
2192
Jon Maloy7ab412d2018-11-10 17:30:24 -05002193 /* If peer is going down we want full re-establish cycle */
2194 if (msg_peer_stopping(hdr)) {
Jon Paul Maloy634696b2016-04-15 13:33:03 -04002195 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Maloy7ab412d2018-11-10 17:30:24 -05002196 break;
2197 }
Tuong Lien91986ee2019-02-11 13:29:43 +07002198
2199 /* If this endpoint was re-created while peer was ESTABLISHING
2200 * it doesn't know current session number. Force re-synch.
2201 */
2202 if (mtyp == ACTIVATE_MSG && msg_dest_session_valid(hdr) &&
2203 l->session != msg_dest_session(hdr)) {
2204 if (less(l->session, msg_dest_session(hdr)))
2205 l->session = msg_dest_session(hdr) + 1;
2206 break;
2207 }
2208
Jon Maloy7ab412d2018-11-10 17:30:24 -05002209 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
2210 if (mtyp == RESET_MSG || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002211 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
2212
2213 /* ACTIVATE_MSG takes up link if it was already locally reset */
Jon Maloy7ab412d2018-11-10 17:30:24 -05002214 if (mtyp == ACTIVATE_MSG && l->state == LINK_ESTABLISHING)
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002215 rc = TIPC_LINK_UP_EVT;
2216
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002217 l->peer_session = msg_session(hdr);
Jon Maloy7ea817f2018-07-10 01:07:36 +02002218 l->in_session = true;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002219 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002220 if (l->mtu > msg_max_pkt(hdr))
2221 l->mtu = msg_max_pkt(hdr);
2222 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002223
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002224 case STATE_MSG:
Jon Maloy9012de52018-07-10 01:07:35 +02002225 l->rcv_nxt_state = msg_seqno(hdr) + 1;
2226
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002227 /* Update own tolerance if peer indicates a non-zero value */
Jon Maloy047491e2018-10-10 17:34:01 +02002228 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL)) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002229 l->tolerance = peers_tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002230 l->bc_rcvlink->tolerance = peers_tol;
2231 }
Jon Paul Maloyf7967552016-11-23 21:05:26 -05002232 /* Update own prio if peer indicates a different value */
2233 if ((peers_prio != l->priority) &&
2234 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01002235 l->priority = peers_prio;
2236 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
2237 }
2238
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002239 l->silent_intv_cnt = 0;
2240 l->stats.recv_states++;
2241 if (msg_probe(hdr))
2242 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002243
2244 if (!link_is_up(l)) {
2245 if (l->state == LINK_ESTABLISHING)
2246 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002247 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04002248 }
Tuong Lien91959482019-04-04 11:09:51 +07002249
2250 /* Receive Gap ACK blocks from peer if any */
Tuong Liend7626b52020-05-26 16:38:34 +07002251 glen = tipc_get_gap_ack_blks(&ga, l, hdr, true);
Tuong Lien91959482019-04-04 11:09:51 +07002252
2253 tipc_mon_rcv(l->net, data + glen, dlen - glen, l->addr,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04002254 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002255
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002256 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Maloyd3b09992019-12-10 00:52:45 +01002257 if ((reply || msg_is_keepalive(hdr)) &&
2258 more(peers_snd_nxt, rcv_nxt) &&
2259 !tipc_link_is_synching(l) &&
2260 skb_queue_empty(&l->deferdq))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002261 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002262 if (rcvgap || reply)
2263 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
2264 rcvgap, 0, 0, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002265
Tuong Liend7626b52020-05-26 16:38:34 +07002266 released = tipc_link_advance_transmq(l, l, ack, gap, ga, xmitq,
2267 &retransmitted, &rc);
Tuong Lien91959482019-04-04 11:09:51 +07002268 if (gap)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002269 l->stats.recv_nacks++;
Tuong Liend7626b52020-05-26 16:38:34 +07002270 if (released || retransmitted)
2271 tipc_link_update_cwin(l, released, retransmitted);
2272 if (released)
2273 tipc_link_advance_backlog(l, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04002274 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2275 link_prepare_wakeup(l);
2276 }
2277exit:
2278 kfree_skb(skb);
2279 return rc;
2280}
2281
Jon Paul Maloy52666982015-10-22 08:51:41 -04002282/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
2283 */
2284static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
2285 u16 peers_snd_nxt,
2286 struct sk_buff_head *xmitq)
2287{
2288 struct sk_buff *skb;
2289 struct tipc_msg *hdr;
2290 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
2291 u16 ack = l->rcv_nxt - 1;
2292 u16 gap_to = peers_snd_nxt - 1;
2293
2294 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002295 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002296 if (!skb)
2297 return false;
2298 hdr = buf_msg(skb);
2299 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
2300 msg_set_bcast_ack(hdr, ack);
2301 msg_set_bcgap_after(hdr, ack);
2302 if (dfrd_skb)
2303 gap_to = buf_seqno(dfrd_skb) - 1;
2304 msg_set_bcgap_to(hdr, gap_to);
2305 msg_set_non_seq(hdr, bcast);
2306 __skb_queue_tail(xmitq, skb);
2307 return true;
2308}
2309
2310/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
2311 *
2312 * Give a newly added peer node the sequence number where it should
2313 * start receiving and acking broadcast packets.
2314 */
Wu Fengguang742e0382015-10-24 22:56:01 +08002315static void tipc_link_build_bc_init_msg(struct tipc_link *l,
2316 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002317{
2318 struct sk_buff_head list;
2319
2320 __skb_queue_head_init(&list);
2321 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
2322 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04002323 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002324 tipc_link_xmit(l, &list, xmitq);
2325}
2326
2327/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
2328 */
2329void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
2330{
2331 int mtyp = msg_type(hdr);
2332 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
2333
2334 if (link_is_up(l))
2335 return;
2336
2337 if (msg_user(hdr) == BCAST_PROTOCOL) {
2338 l->rcv_nxt = peers_snd_nxt;
2339 l->state = LINK_ESTABLISHED;
2340 return;
2341 }
2342
2343 if (l->peer_caps & TIPC_BCAST_SYNCH)
2344 return;
2345
2346 if (msg_peer_node_is_up(hdr))
2347 return;
2348
2349 /* Compatibility: accept older, less safe initial synch data */
2350 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
2351 l->rcv_nxt = peers_snd_nxt;
2352}
2353
2354/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
2355 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002356int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
2357 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002358{
2359 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002360 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002361
2362 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002363 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002364
2365 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002366 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002367
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04002368 /* Open when peer ackowledges our bcast init msg (pkt #1) */
2369 if (msg_ack(hdr))
2370 l->bc_peer_is_up = true;
2371
2372 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002373 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002374
2375 /* Ignore if peers_snd_nxt goes beyond receive window */
2376 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002377 return rc;
2378
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002379 l->snd_nxt = peers_snd_nxt;
2380 if (link_bc_rcv_gap(l))
2381 rc |= TIPC_LINK_SND_STATE;
2382
2383 /* Return now if sender supports nack via STATE messages */
2384 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
2385 return rc;
2386
2387 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04002388
2389 if (!more(peers_snd_nxt, l->rcv_nxt)) {
2390 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002391 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002392 }
2393
2394 /* Don't NACK if one was recently sent or peeked */
2395 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
2396 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002397 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002398 }
2399
2400 /* Conditionally delay NACK sending until next synch rcv */
2401 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
2402 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
2403 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002404 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002405 }
2406
2407 /* Send NACK now but suppress next one */
2408 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
2409 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002410 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002411}
2412
Tuong Liend7626b52020-05-26 16:38:34 +07002413int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap,
2414 struct tipc_gap_ack_blks *ga,
Tuong Liena91d55d2020-05-26 16:38:36 +07002415 struct sk_buff_head *xmitq,
2416 struct sk_buff_head *retrq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04002417{
Tuong Liend7626b52020-05-26 16:38:34 +07002418 struct tipc_link *l = r->bc_sndlink;
2419 bool unused = false;
2420 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002421
Tuong Liend7626b52020-05-26 16:38:34 +07002422 if (!link_is_up(r) || !r->bc_peer_is_up)
2423 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002424
Tuong Lien03b6fef2020-05-26 16:38:37 +07002425 if (gap) {
2426 l->stats.recv_nacks++;
2427 r->stats.recv_nacks++;
2428 }
2429
Tuong Liend7626b52020-05-26 16:38:34 +07002430 if (less(acked, r->acked) || (acked == r->acked && !gap && !ga))
2431 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002432
Tuong Lienc6ed7a52020-05-26 16:38:35 +07002433 trace_tipc_link_bc_ack(r, acked, gap, &l->transmq);
Tuong Liena91d55d2020-05-26 16:38:36 +07002434 tipc_link_advance_transmq(l, r, acked, gap, ga, retrq, &unused, &rc);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002435
Tuong Liend7626b52020-05-26 16:38:34 +07002436 tipc_link_advance_backlog(l, xmitq);
2437 if (unlikely(!skb_queue_empty(&l->wakeupq)))
2438 link_prepare_wakeup(l);
2439
2440 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04002441}
2442
2443/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04002444 * This function is here for backwards compatibility, since
2445 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04002446 */
2447int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
2448 struct sk_buff_head *xmitq)
2449{
2450 struct tipc_msg *hdr = buf_msg(skb);
2451 u32 dnode = msg_destnode(hdr);
2452 int mtyp = msg_type(hdr);
2453 u16 acked = msg_bcast_ack(hdr);
2454 u16 from = acked + 1;
2455 u16 to = msg_bcgap_to(hdr);
2456 u16 peers_snd_nxt = to + 1;
2457 int rc = 0;
2458
2459 kfree_skb(skb);
2460
2461 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
2462 return 0;
2463
2464 if (mtyp != STATE_MSG)
2465 return 0;
2466
Jon Paul Maloye74a3862016-03-03 14:23:21 -05002467 if (dnode == tipc_own_addr(l->net)) {
Tuong Liena91d55d2020-05-26 16:38:36 +07002468 rc = tipc_link_bc_ack_rcv(l, acked, to - acked, NULL, xmitq,
2469 xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04002470 l->stats.recv_nacks++;
2471 return rc;
2472 }
2473
2474 /* Msg for other node => suppress own NACK at next sync if applicable */
2475 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
2476 l->nack_state = BC_NACK_SND_SUPPRESS;
2477
2478 return 0;
2479}
2480
Jon Maloy16ad3f42019-12-10 00:52:46 +01002481void tipc_link_set_queue_limits(struct tipc_link *l, u32 min_win, u32 max_win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002482{
Jon Maloy218527f2018-03-29 23:20:41 +02002483 int max_bulk = TIPC_MAX_PUBL / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04002484
Jon Maloy16ad3f42019-12-10 00:52:46 +01002485 l->min_win = min_win;
2486 l->ssthresh = max_win;
2487 l->max_win = max_win;
2488 l->window = min_win;
2489 l->backlog[TIPC_LOW_IMPORTANCE].limit = min_win * 2;
2490 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = min_win * 4;
2491 l->backlog[TIPC_HIGH_IMPORTANCE].limit = min_win * 6;
2492 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = min_win * 8;
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002493 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002494}
2495
Allan Stephens5c216e12011-10-18 11:34:29 -04002496/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002497 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05002498 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01002499 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002500void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002501{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002502 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01002503}
2504
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002505static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002506{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002507 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04002508 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002509 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08002510
Jon Paul Maloy662921c2015-07-30 18:24:21 -04002511 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04002512 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
2513 skb_queue_len(&l->transmq), head, tail,
2514 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002515}
Richard Alpe0655f6a2014-11-20 10:29:07 +01002516
2517/* Parse and validate nested (link) properties valid for media, bearer and link
2518 */
2519int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2520{
2521 int err;
2522
Johannes Berg8cb08172019-04-26 14:07:28 +02002523 err = nla_parse_nested_deprecated(props, TIPC_NLA_PROP_MAX, prop,
2524 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01002525 if (err)
2526 return err;
2527
2528 if (props[TIPC_NLA_PROP_PRIO]) {
2529 u32 prio;
2530
2531 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2532 if (prio > TIPC_MAX_LINK_PRI)
2533 return -EINVAL;
2534 }
2535
2536 if (props[TIPC_NLA_PROP_TOL]) {
2537 u32 tol;
2538
2539 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2540 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2541 return -EINVAL;
2542 }
2543
2544 if (props[TIPC_NLA_PROP_WIN]) {
Jon Maloy16ad3f42019-12-10 00:52:46 +01002545 u32 max_win;
Richard Alpe0655f6a2014-11-20 10:29:07 +01002546
Jon Maloy16ad3f42019-12-10 00:52:46 +01002547 max_win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2548 if (max_win < TIPC_DEF_LINK_WIN || max_win > TIPC_MAX_LINK_WIN)
Richard Alpe0655f6a2014-11-20 10:29:07 +01002549 return -EINVAL;
2550 }
2551
2552 return 0;
2553}
Richard Alpe7be57fc2014-11-20 10:29:12 +01002554
Richard Alped8182802014-11-24 11:10:29 +01002555static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002556{
2557 int i;
2558 struct nlattr *stats;
2559
2560 struct nla_map {
2561 u32 key;
2562 u32 val;
2563 };
2564
2565 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002566 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002567 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2568 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2569 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2570 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002571 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01002572 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2573 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2574 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2575 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2576 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2577 s->msg_length_counts : 1},
2578 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2579 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2580 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2581 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2582 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2583 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2584 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2585 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2586 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2587 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2588 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2589 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2590 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2591 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2592 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2593 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2594 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2595 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2596 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2597 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2598 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2599 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2600 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2601 };
2602
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002603 stats = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002604 if (!stats)
2605 return -EMSGSIZE;
2606
2607 for (i = 0; i < ARRAY_SIZE(map); i++)
2608 if (nla_put_u32(skb, map[i].key, map[i].val))
2609 goto msg_full;
2610
2611 nla_nest_end(skb, stats);
2612
2613 return 0;
2614msg_full:
2615 nla_nest_cancel(skb, stats);
2616
2617 return -EMSGSIZE;
2618}
2619
2620/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05002621int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2622 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002623{
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002624 u32 self = tipc_own_addr(net);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002625 struct nlattr *attrs;
2626 struct nlattr *prop;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002627 void *hdr;
2628 int err;
Richard Alpe7be57fc2014-11-20 10:29:12 +01002629
Richard Alpebfb3e5d2015-02-09 09:50:03 +01002630 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02002631 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002632 if (!hdr)
2633 return -EMSGSIZE;
2634
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002635 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002636 if (!attrs)
2637 goto msg_full;
2638
2639 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2640 goto attr_msg_full;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002641 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, tipc_cluster_mask(self)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002642 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04002643 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002644 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002645 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002646 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002647 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002648 goto attr_msg_full;
2649
2650 if (tipc_link_is_up(link))
2651 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2652 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04002653 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01002654 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2655 goto attr_msg_full;
2656
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002657 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Richard Alpe7be57fc2014-11-20 10:29:12 +01002658 if (!prop)
2659 goto attr_msg_full;
2660 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2661 goto prop_msg_full;
2662 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2663 goto prop_msg_full;
2664 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04002665 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01002666 goto prop_msg_full;
2667 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2668 goto prop_msg_full;
2669 nla_nest_end(msg->skb, prop);
2670
2671 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2672 if (err)
2673 goto attr_msg_full;
2674
2675 nla_nest_end(msg->skb, attrs);
2676 genlmsg_end(msg->skb, hdr);
2677
2678 return 0;
2679
2680prop_msg_full:
2681 nla_nest_cancel(msg->skb, prop);
2682attr_msg_full:
2683 nla_nest_cancel(msg->skb, attrs);
2684msg_full:
2685 genlmsg_cancel(msg->skb, hdr);
2686
2687 return -EMSGSIZE;
2688}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002689
2690static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2691 struct tipc_stats *stats)
2692{
2693 int i;
2694 struct nlattr *nest;
2695
2696 struct nla_map {
2697 __u32 key;
2698 __u32 val;
2699 };
2700
2701 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002702 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002703 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2704 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2705 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2706 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002707 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002708 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2709 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2710 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2711 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2712 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2713 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2714 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2715 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2716 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2717 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2718 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2719 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2720 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2721 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2722 };
2723
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002724 nest = nla_nest_start_noflag(skb, TIPC_NLA_LINK_STATS);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002725 if (!nest)
2726 return -EMSGSIZE;
2727
2728 for (i = 0; i < ARRAY_SIZE(map); i++)
2729 if (nla_put_u32(skb, map[i].key, map[i].val))
2730 goto msg_full;
2731
2732 nla_nest_end(skb, nest);
2733
2734 return 0;
2735msg_full:
2736 nla_nest_cancel(skb, nest);
2737
2738 return -EMSGSIZE;
2739}
2740
Tuong Lien03b6fef2020-05-26 16:38:37 +07002741int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg,
2742 struct tipc_link *bcl)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002743{
2744 int err;
2745 void *hdr;
2746 struct nlattr *attrs;
2747 struct nlattr *prop;
Hoang Le02ec6ca2019-03-19 18:49:48 +07002748 u32 bc_mode = tipc_bcast_get_broadcast_mode(net);
2749 u32 bc_ratio = tipc_bcast_get_broadcast_ratio(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002750
2751 if (!bcl)
2752 return 0;
2753
2754 tipc_bcast_lock(net);
2755
2756 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2757 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002758 if (!hdr) {
2759 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002760 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002761 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002762
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002763 attrs = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002764 if (!attrs)
2765 goto msg_full;
2766
2767 /* The broadcast link is always up */
2768 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2769 goto attr_msg_full;
2770
2771 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2772 goto attr_msg_full;
2773 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2774 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002775 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002776 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002777 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002778 goto attr_msg_full;
2779
Michal Kubecekae0be8d2019-04-26 11:13:06 +02002780 prop = nla_nest_start_noflag(msg->skb, TIPC_NLA_LINK_PROP);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002781 if (!prop)
2782 goto attr_msg_full;
Jon Maloy16ad3f42019-12-10 00:52:46 +01002783 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->max_win))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002784 goto prop_msg_full;
Hoang Le02ec6ca2019-03-19 18:49:48 +07002785 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST, bc_mode))
2786 goto prop_msg_full;
2787 if (bc_mode & BCLINK_MODE_SEL)
2788 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_BROADCAST_RATIO,
2789 bc_ratio))
2790 goto prop_msg_full;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002791 nla_nest_end(msg->skb, prop);
2792
2793 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2794 if (err)
2795 goto attr_msg_full;
2796
2797 tipc_bcast_unlock(net);
2798 nla_nest_end(msg->skb, attrs);
2799 genlmsg_end(msg->skb, hdr);
2800
2801 return 0;
2802
2803prop_msg_full:
2804 nla_nest_cancel(msg->skb, prop);
2805attr_msg_full:
2806 nla_nest_cancel(msg->skb, attrs);
2807msg_full:
2808 tipc_bcast_unlock(net);
2809 genlmsg_cancel(msg->skb, hdr);
2810
2811 return -EMSGSIZE;
2812}
2813
Richard Alped01332f2016-02-01 08:19:56 +01002814void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2815 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002816{
2817 l->tolerance = tol;
Jon Maloy047491e2018-10-10 17:34:01 +02002818 if (l->bc_rcvlink)
2819 l->bc_rcvlink->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002820 if (link_is_up(l))
2821 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002822}
2823
Richard Alped01332f2016-02-01 08:19:56 +01002824void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2825 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002826{
2827 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002828 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002829}
2830
2831void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2832{
2833 l->abort_limit = limit;
2834}
Tuong Lienb4b97712018-12-19 09:17:56 +07002835
Tuong Lienb4b97712018-12-19 09:17:56 +07002836/**
2837 * tipc_link_dump - dump TIPC link data
2838 * @l: tipc link to be dumped
2839 * @dqueues: bitmask to decide if any link queue to be dumped?
2840 * - TIPC_DUMP_NONE: don't dump link queues
2841 * - TIPC_DUMP_TRANSMQ: dump link transmq queue
2842 * - TIPC_DUMP_BACKLOGQ: dump link backlog queue
2843 * - TIPC_DUMP_DEFERDQ: dump link deferd queue
2844 * - TIPC_DUMP_INPUTQ: dump link input queue
2845 * - TIPC_DUMP_WAKEUP: dump link wakeup queue
2846 * - TIPC_DUMP_ALL: dump all the link queues above
2847 * @buf: returned buffer of dump data in format
2848 */
2849int tipc_link_dump(struct tipc_link *l, u16 dqueues, char *buf)
2850{
2851 int i = 0;
2852 size_t sz = (dqueues) ? LINK_LMAX : LINK_LMIN;
2853 struct sk_buff_head *list;
2854 struct sk_buff *hskb, *tskb;
2855 u32 len;
2856
2857 if (!l) {
2858 i += scnprintf(buf, sz, "link data: (null)\n");
2859 return i;
2860 }
2861
2862 i += scnprintf(buf, sz, "link data: %x", l->addr);
2863 i += scnprintf(buf + i, sz - i, " %x", l->state);
2864 i += scnprintf(buf + i, sz - i, " %u", l->in_session);
2865 i += scnprintf(buf + i, sz - i, " %u", l->session);
2866 i += scnprintf(buf + i, sz - i, " %u", l->peer_session);
2867 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt);
2868 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt);
2869 i += scnprintf(buf + i, sz - i, " %u", l->snd_nxt_state);
2870 i += scnprintf(buf + i, sz - i, " %u", l->rcv_nxt_state);
2871 i += scnprintf(buf + i, sz - i, " %x", l->peer_caps);
2872 i += scnprintf(buf + i, sz - i, " %u", l->silent_intv_cnt);
2873 i += scnprintf(buf + i, sz - i, " %u", l->rst_cnt);
Tuong Lien71204232019-08-15 10:24:08 +07002874 i += scnprintf(buf + i, sz - i, " %u", 0);
Jon Maloy77cf8ed2019-06-25 17:36:43 +02002875 i += scnprintf(buf + i, sz - i, " %u", 0);
Tuong Lienb4b97712018-12-19 09:17:56 +07002876 i += scnprintf(buf + i, sz - i, " %u", l->acked);
2877
2878 list = &l->transmq;
2879 len = skb_queue_len(list);
2880 hskb = skb_peek(list);
2881 tskb = skb_peek_tail(list);
2882 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2883 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2884 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2885
2886 list = &l->deferdq;
2887 len = skb_queue_len(list);
2888 hskb = skb_peek(list);
2889 tskb = skb_peek_tail(list);
2890 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2891 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2892 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2893
2894 list = &l->backlogq;
2895 len = skb_queue_len(list);
2896 hskb = skb_peek(list);
2897 tskb = skb_peek_tail(list);
2898 i += scnprintf(buf + i, sz - i, " | %u %u %u", len,
2899 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2900 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2901
2902 list = l->inputq;
2903 len = skb_queue_len(list);
2904 hskb = skb_peek(list);
2905 tskb = skb_peek_tail(list);
2906 i += scnprintf(buf + i, sz - i, " | %u %u %u\n", len,
2907 (hskb) ? msg_seqno(buf_msg(hskb)) : 0,
2908 (tskb) ? msg_seqno(buf_msg(tskb)) : 0);
2909
2910 if (dqueues & TIPC_DUMP_TRANSMQ) {
2911 i += scnprintf(buf + i, sz - i, "transmq: ");
2912 i += tipc_list_dump(&l->transmq, false, buf + i);
2913 }
2914 if (dqueues & TIPC_DUMP_BACKLOGQ) {
2915 i += scnprintf(buf + i, sz - i,
2916 "backlogq: <%u %u %u %u %u>, ",
2917 l->backlog[TIPC_LOW_IMPORTANCE].len,
2918 l->backlog[TIPC_MEDIUM_IMPORTANCE].len,
2919 l->backlog[TIPC_HIGH_IMPORTANCE].len,
2920 l->backlog[TIPC_CRITICAL_IMPORTANCE].len,
2921 l->backlog[TIPC_SYSTEM_IMPORTANCE].len);
2922 i += tipc_list_dump(&l->backlogq, false, buf + i);
2923 }
2924 if (dqueues & TIPC_DUMP_DEFERDQ) {
2925 i += scnprintf(buf + i, sz - i, "deferdq: ");
2926 i += tipc_list_dump(&l->deferdq, false, buf + i);
2927 }
2928 if (dqueues & TIPC_DUMP_INPUTQ) {
2929 i += scnprintf(buf + i, sz - i, "inputq: ");
2930 i += tipc_list_dump(l->inputq, false, buf + i);
2931 }
2932 if (dqueues & TIPC_DUMP_WAKEUP) {
2933 i += scnprintf(buf + i, sz - i, "wakeup: ");
2934 i += tipc_list_dump(&l->wakeupq, false, buf + i);
2935 }
2936
2937 return i;
2938}