blob: 3c230466804d69a16cb6a168102d5a397db3d6b0 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/link.c: TIPC link code
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloye74a3862016-03-03 14:23:21 -05004 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
Ying Xue198d73b2013-06-17 10:54:42 -04005 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -040038#include "subscr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "link.h"
Richard Alpe7be57fc2014-11-20 10:29:12 +010040#include "bcast.h"
Jon Paul Maloy9816f062014-05-14 05:39:15 -040041#include "socket.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010042#include "name_distr.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010043#include "discover.h"
Richard Alpe0655f6a2014-11-20 10:29:07 +010044#include "netlink.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040045#include "monitor.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010046
Ying Xue796c75d2013-06-17 10:54:48 -040047#include <linux/pkt_sched.h>
48
Jon Paul Maloy38206d52015-11-19 14:30:46 -050049struct tipc_stats {
Jon Paul Maloy95901122016-11-25 10:35:02 -050050 u32 sent_pkts;
51 u32 recv_pkts;
Jon Paul Maloy38206d52015-11-19 14:30:46 -050052 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77};
78
79/**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
Jon Paul Maloy38206d52015-11-19 14:30:46 -050091 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040099 * @mon_state: cookie with information needed by link monitor
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500127 struct net *net;
128
129 /* Management and link supervision data */
130 u32 peer_session;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500131 u32 session;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500132 u32 peer_bearer_id;
133 u32 bearer_id;
134 u32 tolerance;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500140 char if_name[TIPC_MAX_IF_NAME];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500141 u32 priority;
142 char net_plane;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400143 struct tipc_mon_state mon_state;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400144 u16 rst_cnt;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500145
146 /* Failover/synch */
147 u16 drop_point;
148 struct sk_buff *failover_reasm_skb;
149
150 /* Max packet negotiation */
151 u16 mtu;
152 u16 advertised_mtu;
153
154 /* Sending */
155 struct sk_buff_head transmq;
156 struct sk_buff_head backlogq;
157 struct {
158 u16 len;
159 u16 limit;
160 } backlog[5];
161 u16 snd_nxt;
162 u16 last_retransm;
163 u16 window;
164 u32 stale_count;
165
166 /* Reception */
167 u16 rcv_nxt;
168 u32 rcv_unacked;
169 struct sk_buff_head deferdq;
170 struct sk_buff_head *inputq;
171 struct sk_buff_head *namedq;
172
173 /* Congestion handling */
174 struct sk_buff_head wakeupq;
175
176 /* Fragmentation/reassembly */
177 struct sk_buff *reasm_buf;
178
179 /* Broadcast */
180 u16 ackers;
181 u16 acked;
182 struct tipc_link *bc_rcvlink;
183 struct tipc_link *bc_sndlink;
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400184 unsigned long prev_retr;
185 u16 prev_from;
186 u16 prev_to;
187 u8 nack_state;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500188 bool bc_peer_is_up;
189
190 /* Statistics */
191 struct tipc_stats stats;
192};
193
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400194/*
195 * Error message prefixes
196 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400197static const char *link_co_err = "Link tunneling error, ";
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400198static const char *link_rst_msg = "Resetting link ";
Richard Alpe7be57fc2014-11-20 10:29:12 +0100199
Jon Paul Maloy52666982015-10-22 08:51:41 -0400200/* Send states for broadcast NACKs
201 */
202enum {
203 BC_NACK_SND_CONDITIONAL,
204 BC_NACK_SND_UNCONDITIONAL,
205 BC_NACK_SND_SUPPRESS,
206};
207
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -0400208#define TIPC_BC_RETR_LIMIT 10 /* [ms] */
209
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900210/*
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400211 * Interval between NACKs when packets arrive out of order
212 */
213#define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500214
215/* Wildcard value for link session numbers. When it is known that
216 * peer endpoint is down, any session number must be accepted.
Allan Stephensa686e682008-06-04 17:29:39 -0700217 */
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500218#define ANY_SESSION 0x10000
Allan Stephensa686e682008-06-04 17:29:39 -0700219
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400220/* Link FSM states:
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400221 */
222enum {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400223 LINK_ESTABLISHED = 0xe,
224 LINK_ESTABLISHING = 0xe << 4,
225 LINK_RESET = 0x1 << 8,
226 LINK_RESETTING = 0x2 << 12,
227 LINK_PEER_RESET = 0xd << 16,
228 LINK_FAILINGOVER = 0xf << 20,
229 LINK_SYNCHING = 0xc << 24
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400230};
231
232/* Link FSM state checking routines
233 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400234static int link_is_up(struct tipc_link *l)
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400235{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400236 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
Jon Paul Maloyd3504c32015-07-16 16:54:25 -0400237}
238
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400239static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 struct sk_buff_head *xmitq);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400241static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100242 bool probe_reply, u16 rcvgap,
243 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -0400244 struct sk_buff_head *xmitq);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500245static void link_print(struct tipc_link *l, const char *str);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -0400246static int tipc_link_build_nack_msg(struct tipc_link *l,
247 struct sk_buff_head *xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400248static void tipc_link_build_bc_init_msg(struct tipc_link *l,
249 struct sk_buff_head *xmitq);
250static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -0400251
Per Lidenb97bf3f2006-01-02 19:04:38 +0100252/*
Sam Ravnborg05790c62006-03-20 22:37:04 -0800253 * Simple non-static link routines (i.e. referenced outside this file)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100254 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400255bool tipc_link_is_up(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100256{
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400257 return link_is_up(l);
258}
259
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400260bool tipc_link_peer_is_down(struct tipc_link *l)
261{
262 return l->state == LINK_PEER_RESET;
263}
264
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400265bool tipc_link_is_reset(struct tipc_link *l)
266{
267 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
268}
269
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400270bool tipc_link_is_establishing(struct tipc_link *l)
271{
272 return l->state == LINK_ESTABLISHING;
273}
274
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400275bool tipc_link_is_synching(struct tipc_link *l)
276{
277 return l->state == LINK_SYNCHING;
278}
279
280bool tipc_link_is_failingover(struct tipc_link *l)
281{
282 return l->state == LINK_FAILINGOVER;
283}
284
285bool tipc_link_is_blocked(struct tipc_link *l)
286{
287 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100288}
289
Wu Fengguang742e0382015-10-24 22:56:01 +0800290static bool link_is_bc_sndlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400291{
292 return !l->bc_sndlink;
293}
294
Wu Fengguang742e0382015-10-24 22:56:01 +0800295static bool link_is_bc_rcvlink(struct tipc_link *l)
Jon Paul Maloy52666982015-10-22 08:51:41 -0400296{
297 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
298}
299
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400300int tipc_link_is_active(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100301{
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400302 return l->active;
303}
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400304
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400305void tipc_link_set_active(struct tipc_link *l, bool active)
306{
307 l->active = active;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100308}
309
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500310u32 tipc_link_id(struct tipc_link *l)
311{
312 return l->peer_bearer_id << 16 | l->bearer_id;
313}
314
315int tipc_link_window(struct tipc_link *l)
316{
317 return l->window;
318}
319
320int tipc_link_prio(struct tipc_link *l)
321{
322 return l->priority;
323}
324
325unsigned long tipc_link_tolerance(struct tipc_link *l)
326{
327 return l->tolerance;
328}
329
330struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
331{
332 return l->inputq;
333}
334
335char tipc_link_plane(struct tipc_link *l)
336{
337 return l->net_plane;
338}
339
Jon Paul Maloy52666982015-10-22 08:51:41 -0400340void tipc_link_add_bc_peer(struct tipc_link *snd_l,
341 struct tipc_link *uc_l,
342 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400343{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400344 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
345
346 snd_l->ackers++;
347 rcv_l->acked = snd_l->snd_nxt - 1;
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500348 snd_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400349 tipc_link_build_bc_init_msg(uc_l, xmitq);
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400350}
351
Jon Paul Maloy52666982015-10-22 08:51:41 -0400352void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
353 struct tipc_link *rcv_l,
354 struct sk_buff_head *xmitq)
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400355{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400356 u16 ack = snd_l->snd_nxt - 1;
357
358 snd_l->ackers--;
Jon Paul Maloya71eb722016-07-11 16:08:36 -0400359 rcv_l->bc_peer_is_up = true;
360 rcv_l->state = LINK_ESTABLISHED;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400361 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
362 tipc_link_reset(rcv_l);
363 rcv_l->state = LINK_RESET;
364 if (!snd_l->ackers) {
365 tipc_link_reset(snd_l);
Jon Paul Maloy9a650832015-11-19 14:12:50 -0500366 snd_l->state = LINK_RESET;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400367 __skb_queue_purge(xmitq);
368 }
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400369}
370
371int tipc_link_bc_peers(struct tipc_link *l)
372{
373 return l->ackers;
374}
375
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -0400376u16 link_bc_rcv_gap(struct tipc_link *l)
377{
378 struct sk_buff *skb = skb_peek(&l->deferdq);
379 u16 gap = 0;
380
381 if (more(l->snd_nxt, l->rcv_nxt))
382 gap = l->snd_nxt - l->rcv_nxt;
383 if (skb)
384 gap = buf_seqno(skb) - l->rcv_nxt;
385 return gap;
386}
387
Jon Paul Maloy959e1782015-10-22 08:51:43 -0400388void tipc_link_set_mtu(struct tipc_link *l, int mtu)
389{
390 l->mtu = mtu;
391}
392
393int tipc_link_mtu(struct tipc_link *l)
394{
395 return l->mtu;
396}
397
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500398u16 tipc_link_rcv_nxt(struct tipc_link *l)
399{
400 return l->rcv_nxt;
401}
402
403u16 tipc_link_acked(struct tipc_link *l)
404{
405 return l->acked;
406}
407
408char *tipc_link_name(struct tipc_link *l)
409{
410 return l->name;
411}
412
Per Lidenb97bf3f2006-01-02 19:04:38 +0100413/**
Per Liden4323add2006-01-18 00:38:21 +0100414 * tipc_link_create - create a new link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400415 * @n: pointer to associated node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400416 * @if_name: associated interface name
417 * @bearer_id: id (index) of associated bearer
418 * @tolerance: link tolerance to be used by link
419 * @net_plane: network plane (A,B,c..) this link belongs to
420 * @mtu: mtu to be advertised by link
421 * @priority: priority to be used by link
422 * @window: send window to be used by link
423 * @session: session to be used by link
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400424 * @ownnode: identity of own node
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400425 * @peer: node id of peer node
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400426 * @peer_caps: bitmap describing peer node capabilities
Jon Paul Maloy52666982015-10-22 08:51:41 -0400427 * @bc_sndlink: the namespace global link used for broadcast sending
428 * @bc_rcvlink: the peer specific link used for broadcast reception
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400429 * @inputq: queue to put messages ready for delivery
430 * @namedq: queue to put binding table update messages ready for delivery
431 * @link: return value, pointer to put the created link
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900432 *
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400433 * Returns true if link was created, otherwise false
Per Lidenb97bf3f2006-01-02 19:04:38 +0100434 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400435bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400436 int tolerance, char net_plane, u32 mtu, int priority,
437 int window, u32 session, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400438 u16 peer_caps,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400439 struct tipc_link *bc_sndlink,
440 struct tipc_link *bc_rcvlink,
441 struct sk_buff_head *inputq,
442 struct sk_buff_head *namedq,
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400443 struct tipc_link **link)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100444{
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400445 struct tipc_link *l;
Allan Stephens37b9c082011-02-28 11:32:27 -0500446
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400447 l = kzalloc(sizeof(*l), GFP_ATOMIC);
448 if (!l)
449 return false;
450 *link = l;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500451 l->session = session;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400452
453 /* Note: peer i/f name is completed by reset/activate message */
454 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
455 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
456 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500457 strcpy(l->if_name, if_name);
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400458 l->addr = peer;
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400459 l->peer_caps = peer_caps;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400460 l->net = net;
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500461 l->peer_session = ANY_SESSION;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400462 l->bearer_id = bearer_id;
463 l->tolerance = tolerance;
464 l->net_plane = net_plane;
465 l->advertised_mtu = mtu;
466 l->mtu = mtu;
467 l->priority = priority;
468 tipc_link_set_queue_limits(l, window);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400469 l->ackers = 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400470 l->bc_sndlink = bc_sndlink;
471 l->bc_rcvlink = bc_rcvlink;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400472 l->inputq = inputq;
473 l->namedq = namedq;
474 l->state = LINK_RESETTING;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400475 __skb_queue_head_init(&l->transmq);
476 __skb_queue_head_init(&l->backlogq);
477 __skb_queue_head_init(&l->deferdq);
478 skb_queue_head_init(&l->wakeupq);
479 skb_queue_head_init(l->inputq);
480 return true;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100481}
482
Jon Paul Maloy32301902015-10-22 08:51:37 -0400483/**
484 * tipc_link_bc_create - create new link to be used for broadcast
485 * @n: pointer to associated node
Jon Maloy4c94cc22017-11-30 16:47:25 +0100486 * @mtu: mtu to be used initially if no peers
Jon Paul Maloy32301902015-10-22 08:51:37 -0400487 * @window: send window to be used
488 * @inputq: queue to put messages ready for delivery
489 * @namedq: queue to put binding table update messages ready for delivery
490 * @link: return value, pointer to put the created link
491 *
492 * Returns true if link was created, otherwise false
493 */
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400494bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400495 int mtu, int window, u16 peer_caps,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400496 struct sk_buff_head *inputq,
497 struct sk_buff_head *namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400498 struct tipc_link *bc_sndlink,
Jon Paul Maloy32301902015-10-22 08:51:37 -0400499 struct tipc_link **link)
500{
501 struct tipc_link *l;
502
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400503 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400504 0, ownnode, peer, peer_caps, bc_sndlink,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400505 NULL, inputq, namedq, link))
Jon Paul Maloy32301902015-10-22 08:51:37 -0400506 return false;
507
508 l = *link;
509 strcpy(l->name, tipc_bclink_name);
510 tipc_link_reset(l);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400511 l->state = LINK_RESET;
Jon Paul Maloy2f566122015-10-22 08:51:39 -0400512 l->ackers = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400513 l->bc_rcvlink = l;
514
515 /* Broadcast send link is always up */
516 if (link_is_bc_sndlink(l))
517 l->state = LINK_ESTABLISHED;
518
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500519 /* Disable replicast if even a single peer doesn't support it */
520 if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
521 tipc_bcast_disable_rcast(net);
522
Jon Paul Maloy32301902015-10-22 08:51:37 -0400523 return true;
524}
525
Per Lidenb97bf3f2006-01-02 19:04:38 +0100526/**
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400527 * tipc_link_fsm_evt - link finite state machine
528 * @l: pointer to link
529 * @evt: state machine event to be processed
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400530 */
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400531int tipc_link_fsm_evt(struct tipc_link *l, int evt)
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400532{
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400533 int rc = 0;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400534
535 switch (l->state) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400536 case LINK_RESETTING:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400537 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400538 case LINK_PEER_RESET_EVT:
539 l->state = LINK_PEER_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400540 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400541 case LINK_RESET_EVT:
542 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400543 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400544 case LINK_FAILURE_EVT:
545 case LINK_FAILOVER_BEGIN_EVT:
546 case LINK_ESTABLISH_EVT:
547 case LINK_FAILOVER_END_EVT:
548 case LINK_SYNCH_BEGIN_EVT:
549 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400550 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400551 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400552 }
553 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400554 case LINK_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400555 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400556 case LINK_PEER_RESET_EVT:
557 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400558 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400559 case LINK_FAILOVER_BEGIN_EVT:
560 l->state = LINK_FAILINGOVER;
561 case LINK_FAILURE_EVT:
562 case LINK_RESET_EVT:
563 case LINK_ESTABLISH_EVT:
564 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400565 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400566 case LINK_SYNCH_BEGIN_EVT:
567 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400568 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400569 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400570 }
571 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400572 case LINK_PEER_RESET:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400573 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400574 case LINK_RESET_EVT:
575 l->state = LINK_ESTABLISHING;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400576 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400577 case LINK_PEER_RESET_EVT:
578 case LINK_ESTABLISH_EVT:
579 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400580 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400581 case LINK_SYNCH_BEGIN_EVT:
582 case LINK_SYNCH_END_EVT:
583 case LINK_FAILOVER_BEGIN_EVT:
584 case LINK_FAILOVER_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400585 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400586 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400587 }
588 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400589 case LINK_FAILINGOVER:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400590 switch (evt) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400591 case LINK_FAILOVER_END_EVT:
592 l->state = LINK_RESET;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400593 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400594 case LINK_PEER_RESET_EVT:
595 case LINK_RESET_EVT:
596 case LINK_ESTABLISH_EVT:
597 case LINK_FAILURE_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400598 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400599 case LINK_FAILOVER_BEGIN_EVT:
600 case LINK_SYNCH_BEGIN_EVT:
601 case LINK_SYNCH_END_EVT:
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400602 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400603 goto illegal_evt;
604 }
605 break;
606 case LINK_ESTABLISHING:
607 switch (evt) {
608 case LINK_ESTABLISH_EVT:
609 l->state = LINK_ESTABLISHED;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400610 break;
611 case LINK_FAILOVER_BEGIN_EVT:
612 l->state = LINK_FAILINGOVER;
613 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400614 case LINK_RESET_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400615 l->state = LINK_RESET;
616 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400617 case LINK_FAILURE_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400618 case LINK_PEER_RESET_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400619 case LINK_SYNCH_BEGIN_EVT:
620 case LINK_FAILOVER_END_EVT:
621 break;
622 case LINK_SYNCH_END_EVT:
623 default:
624 goto illegal_evt;
625 }
626 break;
627 case LINK_ESTABLISHED:
628 switch (evt) {
629 case LINK_PEER_RESET_EVT:
630 l->state = LINK_PEER_RESET;
631 rc |= TIPC_LINK_DOWN_EVT;
632 break;
633 case LINK_FAILURE_EVT:
634 l->state = LINK_RESETTING;
635 rc |= TIPC_LINK_DOWN_EVT;
636 break;
637 case LINK_RESET_EVT:
638 l->state = LINK_RESET;
639 break;
640 case LINK_ESTABLISH_EVT:
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400641 case LINK_SYNCH_END_EVT:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400642 break;
643 case LINK_SYNCH_BEGIN_EVT:
644 l->state = LINK_SYNCHING;
645 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400646 case LINK_FAILOVER_BEGIN_EVT:
647 case LINK_FAILOVER_END_EVT:
648 default:
649 goto illegal_evt;
650 }
651 break;
652 case LINK_SYNCHING:
653 switch (evt) {
654 case LINK_PEER_RESET_EVT:
655 l->state = LINK_PEER_RESET;
656 rc |= TIPC_LINK_DOWN_EVT;
657 break;
658 case LINK_FAILURE_EVT:
659 l->state = LINK_RESETTING;
660 rc |= TIPC_LINK_DOWN_EVT;
661 break;
662 case LINK_RESET_EVT:
663 l->state = LINK_RESET;
664 break;
665 case LINK_ESTABLISH_EVT:
666 case LINK_SYNCH_BEGIN_EVT:
667 break;
668 case LINK_SYNCH_END_EVT:
669 l->state = LINK_ESTABLISHED;
670 break;
671 case LINK_FAILOVER_BEGIN_EVT:
672 case LINK_FAILOVER_END_EVT:
673 default:
674 goto illegal_evt;
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400675 }
676 break;
677 default:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400678 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400679 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400680 return rc;
681illegal_evt:
682 pr_err("Illegal FSM event %x in state %x on link %s\n",
683 evt, l->state, l->name);
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400684 return rc;
685}
686
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400687/* link_profile_stats - update statistical profiling of traffic
688 */
689static void link_profile_stats(struct tipc_link *l)
690{
691 struct sk_buff *skb;
692 struct tipc_msg *msg;
693 int length;
694
695 /* Update counters used in statistical profiling of send traffic */
696 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
697 l->stats.queue_sz_counts++;
698
699 skb = skb_peek(&l->transmq);
700 if (!skb)
701 return;
702 msg = buf_msg(skb);
703 length = msg_size(msg);
704
705 if (msg_user(msg) == MSG_FRAGMENTER) {
706 if (msg_type(msg) != FIRST_FRAGMENT)
707 return;
708 length = msg_size(msg_get_wrapped(msg));
709 }
710 l->stats.msg_lengths_total += length;
711 l->stats.msg_length_counts++;
712 if (length <= 64)
713 l->stats.msg_length_profile[0]++;
714 else if (length <= 256)
715 l->stats.msg_length_profile[1]++;
716 else if (length <= 1024)
717 l->stats.msg_length_profile[2]++;
718 else if (length <= 4096)
719 l->stats.msg_length_profile[3]++;
720 else if (length <= 16384)
721 l->stats.msg_length_profile[4]++;
722 else if (length <= 32768)
723 l->stats.msg_length_profile[5]++;
724 else
725 l->stats.msg_length_profile[6]++;
726}
727
728/* tipc_link_timeout - perform periodic task as instructed from node timeout
729 */
730int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
731{
Ying Xuec91522f2016-06-15 14:11:31 +0800732 int mtyp = 0;
733 int rc = 0;
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400734 bool state = false;
735 bool probe = false;
736 bool setup = false;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400737 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
738 u16 bc_acked = l->bc_rcvlink->acked;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400739 struct tipc_mon_state *mstate = &l->mon_state;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400740
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400741 switch (l->state) {
742 case LINK_ESTABLISHED:
743 case LINK_SYNCHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400744 mtyp = STATE_MSG;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400745 link_profile_stats(l);
746 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
747 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
748 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400749 state = bc_acked != bc_snt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400750 state |= l->bc_rcvlink->rcv_unacked;
751 state |= l->rcv_unacked;
752 state |= !skb_queue_empty(&l->transmq);
753 state |= !skb_queue_empty(&l->deferdq);
754 probe = mstate->probing;
755 probe |= l->silent_intv_cnt;
756 if (probe || mstate->monitoring)
757 l->silent_intv_cnt++;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400758 break;
759 case LINK_RESET:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400760 setup = l->rst_cnt++ <= 4;
761 setup |= !(l->rst_cnt % 16);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400762 mtyp = RESET_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400763 break;
764 case LINK_ESTABLISHING:
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400765 setup = true;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400766 mtyp = ACTIVATE_MSG;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400767 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400768 case LINK_PEER_RESET:
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400769 case LINK_RESETTING:
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400770 case LINK_FAILINGOVER:
771 break;
772 default:
773 break;
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400774 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400775
Jon Paul Maloy42b18f62016-04-15 13:33:05 -0400776 if (state || probe || setup)
Jon Maloy8d6e79d2017-11-08 09:59:26 +0100777 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, 0, xmitq);
Jon Paul Maloy5045f7b2015-07-30 18:24:20 -0400778
Jon Paul Maloy333ef692015-07-16 16:54:28 -0400779 return rc;
780}
781
Jon Paul Maloy6ab30f92015-07-16 16:54:27 -0400782/**
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400783 * link_schedule_user - schedule a message sender for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500784 * @l: congested link
785 * @hdr: header of message that is being sent
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400786 * Create pseudo msg to send back to user when congestion abates
Per Lidenb97bf3f2006-01-02 19:04:38 +0100787 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500788static int link_schedule_user(struct tipc_link *l, struct tipc_msg *hdr)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100789{
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500790 u32 dnode = tipc_own_addr(l->net);
791 u32 dport = msg_origport(hdr);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400792 struct sk_buff *skb;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100793
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400794 /* Create and schedule wakeup pseudo message */
795 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500796 dnode, l->addr, dport, 0, 0);
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400797 if (!skb)
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400798 return -ENOBUFS;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500799 msg_set_dest_droppable(buf_msg(skb), true);
800 TIPC_SKB_CB(skb)->chain_imp = msg_importance(hdr);
801 skb_queue_tail(&l->wakeupq, skb);
802 l->stats.link_congs++;
Jon Paul Maloy3127a022015-03-25 12:07:25 -0400803 return -ELINKCONG;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100804}
805
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400806/**
807 * link_prepare_wakeup - prepare users for wakeup after congestion
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500808 * @l: congested link
809 * Wake up a number of waiting users, as permitted by available space
810 * in the send queue
Jon Paul Maloy50100a52014-08-22 18:09:07 -0400811 */
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400812void link_prepare_wakeup(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100813{
Ying Xue58d78b32014-11-26 11:41:51 +0800814 struct sk_buff *skb, *tmp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500815 int imp, i = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100816
Jon Paul Maloy1f66d162015-03-25 12:07:24 -0400817 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
818 imp = TIPC_SKB_CB(skb)->chain_imp;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500819 if (l->backlog[imp].len < l->backlog[imp].limit) {
820 skb_unlink(skb, &l->wakeupq);
821 skb_queue_tail(l->inputq, skb);
822 } else if (i++ > 10) {
Per Lidenb97bf3f2006-01-02 19:04:38 +0100823 break;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500824 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100825 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100826}
827
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400828void tipc_link_reset(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100829{
Jon Paul Maloye74a3862016-03-03 14:23:21 -0500830 l->peer_session = ANY_SESSION;
831 l->session++;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400832 l->mtu = l->advertised_mtu;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400833 __skb_queue_purge(&l->transmq);
834 __skb_queue_purge(&l->deferdq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -0400835 skb_queue_splice_init(&l->wakeupq, l->inputq);
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400836 __skb_queue_purge(&l->backlogq);
837 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
838 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
839 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
840 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
841 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400842 kfree_skb(l->reasm_buf);
843 kfree_skb(l->failover_reasm_skb);
844 l->reasm_buf = NULL;
845 l->failover_reasm_skb = NULL;
846 l->rcv_unacked = 0;
847 l->snd_nxt = 1;
848 l->rcv_nxt = 1;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400849 l->acked = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400850 l->silent_intv_cnt = 0;
Jon Paul Maloy88e8ac72016-04-15 13:33:04 -0400851 l->rst_cnt = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400852 l->stale_count = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400853 l->bc_peer_is_up = false;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400854 memset(&l->mon_state, 0, sizeof(l->mon_state));
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500855 tipc_link_reset_stats(l);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100856}
857
Per Lidenb97bf3f2006-01-02 19:04:38 +0100858/**
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400859 * tipc_link_xmit(): enqueue buffer list according to queue situation
860 * @link: link to use
861 * @list: chain of buffers containing message
862 * @xmitq: returned list of packets to be sent by caller
863 *
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500864 * Consumes the buffer chain.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400865 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
866 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
867 */
868int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
869 struct sk_buff_head *xmitq)
870{
871 struct tipc_msg *hdr = buf_msg(skb_peek(list));
872 unsigned int maxwin = l->window;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500873 int imp = msg_importance(hdr);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400874 unsigned int mtu = l->mtu;
875 u16 ack = l->rcv_nxt - 1;
876 u16 seqno = l->snd_nxt;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400877 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400878 struct sk_buff_head *transmq = &l->transmq;
879 struct sk_buff_head *backlogq = &l->backlogq;
880 struct sk_buff *skb, *_skb, *bskb;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500881 int pkt_cnt = skb_queue_len(list);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500882 int rc = 0;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400883
Richard Alpe4952cd32016-02-11 10:43:15 +0100884 if (unlikely(msg_size(hdr) > mtu)) {
885 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400886 return -EMSGSIZE;
Richard Alpe4952cd32016-02-11 10:43:15 +0100887 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400888
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500889 /* Allow oversubscription of one data msg per source at congestion */
890 if (unlikely(l->backlog[imp].len >= l->backlog[imp].limit)) {
891 if (imp == TIPC_SYSTEM_IMPORTANCE) {
892 pr_warn("%s<%s>, link overflow", link_rst_msg, l->name);
893 return -ENOBUFS;
894 }
895 rc = link_schedule_user(l, hdr);
896 }
897
Jon Paul Maloy95901122016-11-25 10:35:02 -0500898 if (pkt_cnt > 1) {
899 l->stats.sent_fragmented++;
900 l->stats.sent_fragments += pkt_cnt;
901 }
902
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400903 /* Prepare each packet for sending, and add to relevant queue: */
904 while (skb_queue_len(list)) {
905 skb = skb_peek(list);
906 hdr = buf_msg(skb);
907 msg_set_seqno(hdr, seqno);
908 msg_set_ack(hdr, ack);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400909 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400910
911 if (likely(skb_queue_len(transmq) < maxwin)) {
912 _skb = skb_clone(skb, GFP_ATOMIC);
Richard Alpe4952cd32016-02-11 10:43:15 +0100913 if (!_skb) {
914 skb_queue_purge(list);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400915 return -ENOBUFS;
Richard Alpe4952cd32016-02-11 10:43:15 +0100916 }
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400917 __skb_dequeue(list);
918 __skb_queue_tail(transmq, skb);
919 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400920 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400921 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500922 l->stats.sent_pkts++;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400923 seqno++;
924 continue;
925 }
926 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
927 kfree_skb(__skb_dequeue(list));
928 l->stats.sent_bundled++;
929 continue;
930 }
931 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
932 kfree_skb(__skb_dequeue(list));
933 __skb_queue_tail(backlogq, bskb);
934 l->backlog[msg_importance(buf_msg(bskb))].len++;
935 l->stats.sent_bundled++;
936 l->stats.sent_bundles++;
937 continue;
938 }
939 l->backlog[imp].len += skb_queue_len(list);
940 skb_queue_splice_tail_init(list, backlogq);
941 }
942 l->snd_nxt = seqno;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500943 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -0400944}
945
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400946void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
947{
948 struct sk_buff *skb, *_skb;
949 struct tipc_msg *hdr;
950 u16 seqno = l->snd_nxt;
951 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400952 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400953
954 while (skb_queue_len(&l->transmq) < l->window) {
955 skb = skb_peek(&l->backlogq);
956 if (!skb)
957 break;
958 _skb = skb_clone(skb, GFP_ATOMIC);
959 if (!_skb)
960 break;
961 __skb_dequeue(&l->backlogq);
962 hdr = buf_msg(skb);
963 l->backlog[msg_importance(hdr)].len--;
964 __skb_queue_tail(&l->transmq, skb);
965 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400966 TIPC_SKB_CB(skb)->ackers = l->ackers;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400967 msg_set_seqno(hdr, seqno);
Jon Paul Maloy52666982015-10-22 08:51:41 -0400968 msg_set_ack(hdr, ack);
969 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400970 l->rcv_unacked = 0;
Jon Paul Maloy95901122016-11-25 10:35:02 -0500971 l->stats.sent_pkts++;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400972 seqno++;
973 }
974 l->snd_nxt = seqno;
975}
976
Jon Paul Maloy52666982015-10-22 08:51:41 -0400977static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
Allan Stephensd356eeb2006-06-25 23:40:01 -0700978{
Jon Paul Maloy52666982015-10-22 08:51:41 -0400979 struct tipc_msg *hdr = buf_msg(skb);
Allan Stephensd356eeb2006-06-25 23:40:01 -0700980
Jon Paul Maloy52666982015-10-22 08:51:41 -0400981 pr_warn("Retransmission failure on link <%s>\n", l->name);
Jon Paul Maloy40501f92017-08-21 17:59:30 +0200982 link_print(l, "State of link ");
Jon Paul Maloy52666982015-10-22 08:51:41 -0400983 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
984 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
985 pr_info("sqno %u, prev: %x, src: %x\n",
986 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
Allan Stephensd356eeb2006-06-25 23:40:01 -0700987}
988
Jon Paul Maloy40501f92017-08-21 17:59:30 +0200989int tipc_link_retrans(struct tipc_link *l, struct tipc_link *nacker,
990 u16 from, u16 to, struct sk_buff_head *xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400991{
992 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
993 struct tipc_msg *hdr;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -0400994 u16 ack = l->rcv_nxt - 1;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400995 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400996
997 if (!skb)
998 return 0;
999
1000 /* Detect repeated retransmit failures on same packet */
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001001 if (nacker->last_retransm != buf_seqno(skb)) {
1002 nacker->last_retransm = buf_seqno(skb);
1003 nacker->stale_count = 1;
1004 } else if (++nacker->stale_count > 100) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001005 link_retransmit_failure(l, skb);
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001006 nacker->stale_count = 0;
1007 if (link_is_bc_sndlink(l))
1008 return TIPC_LINK_DOWN_EVT;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001009 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001010 }
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001011
1012 /* Move forward to where retransmission should start */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001013 skb_queue_walk(&l->transmq, skb) {
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001014 if (!less(buf_seqno(skb), from))
1015 break;
1016 }
1017
1018 skb_queue_walk_from(&l->transmq, skb) {
1019 if (more(buf_seqno(skb), to))
1020 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001021 hdr = buf_msg(skb);
1022 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1023 if (!_skb)
1024 return 0;
1025 hdr = buf_msg(_skb);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001026 msg_set_ack(hdr, ack);
1027 msg_set_bcast_ack(hdr, bc_ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001028 _skb->priority = TC_PRIO_CONTROL;
1029 __skb_queue_tail(xmitq, _skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001030 l->stats.retransmitted++;
1031 }
1032 return 0;
1033}
1034
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001035/* tipc_data_input - deliver data and name distr msgs to upper layer
Erik Hugne7ae934b2014-07-01 10:22:40 +02001036 *
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001037 * Consumes buffer if message is of right type
Erik Hugne7ae934b2014-07-01 10:22:40 +02001038 * Node lock must be held
1039 */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001040static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001041 struct sk_buff_head *inputq)
Erik Hugne7ae934b2014-07-01 10:22:40 +02001042{
Jon Maloy399574d2017-10-13 11:04:32 +02001043 struct sk_buff_head *mc_inputq = l->bc_rcvlink->inputq;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001044 struct tipc_msg *hdr = buf_msg(skb);
1045
1046 switch (msg_user(hdr)) {
Erik Hugne7ae934b2014-07-01 10:22:40 +02001047 case TIPC_LOW_IMPORTANCE:
1048 case TIPC_MEDIUM_IMPORTANCE:
1049 case TIPC_HIGH_IMPORTANCE:
1050 case TIPC_CRITICAL_IMPORTANCE:
Jon Maloy2f487712017-10-13 11:04:31 +02001051 if (unlikely(msg_in_group(hdr) || msg_mcast(hdr))) {
Jon Maloy399574d2017-10-13 11:04:32 +02001052 skb_queue_tail(mc_inputq, skb);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001053 return true;
1054 }
Jon Maloy2f487712017-10-13 11:04:31 +02001055 case CONN_MANAGER:
Jon Maloy36c0a9d2017-10-16 16:04:51 +02001056 skb_queue_tail(inputq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001057 return true;
Jon Maloy399574d2017-10-13 11:04:32 +02001058 case GROUP_PROTOCOL:
1059 skb_queue_tail(mc_inputq, skb);
1060 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001061 case NAME_DISTRIBUTOR:
Jon Paul Maloy52666982015-10-22 08:51:41 -04001062 l->bc_rcvlink->state = LINK_ESTABLISHED;
1063 skb_queue_tail(l->namedq, skb);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001064 return true;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001065 case MSG_BUNDLER:
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001066 case TUNNEL_PROTOCOL:
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001067 case MSG_FRAGMENTER:
1068 case BCAST_PROTOCOL:
1069 return false;
1070 default:
1071 pr_warn("Dropping received illegal msg type\n");
1072 kfree_skb(skb);
1073 return false;
1074 };
1075}
1076
1077/* tipc_link_input - process packet that has passed link protocol check
1078 *
1079 * Consumes buffer
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001080 */
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001081static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1082 struct sk_buff_head *inputq)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001083{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001084 struct tipc_msg *hdr = buf_msg(skb);
1085 struct sk_buff **reasm_skb = &l->reasm_buf;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001086 struct sk_buff *iskb;
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001087 struct sk_buff_head tmpq;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001088 int usr = msg_user(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001089 int rc = 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001090 int pos = 0;
1091 int ipos = 0;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001092
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001093 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1094 if (msg_type(hdr) == SYNCH_MSG) {
1095 __skb_queue_purge(&l->deferdq);
1096 goto drop;
Jon Paul Maloy8b4ed862015-03-25 12:07:26 -04001097 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001098 if (!tipc_msg_extract(skb, &iskb, &ipos))
1099 return rc;
1100 kfree_skb(skb);
1101 skb = iskb;
1102 hdr = buf_msg(skb);
1103 if (less(msg_seqno(hdr), l->drop_point))
1104 goto drop;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001105 if (tipc_data_input(l, skb, inputq))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001106 return rc;
1107 usr = msg_user(hdr);
1108 reasm_skb = &l->failover_reasm_skb;
1109 }
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001110
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001111 if (usr == MSG_BUNDLER) {
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001112 skb_queue_head_init(&tmpq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001113 l->stats.recv_bundles++;
1114 l->stats.recv_bundled += msg_msgcnt(hdr);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001115 while (tipc_msg_extract(skb, &iskb, &pos))
Jon Paul Maloy9945e802015-10-15 14:52:40 -04001116 tipc_data_input(l, iskb, &tmpq);
1117 tipc_skb_queue_splice_tail(&tmpq, inputq);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001118 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001119 } else if (usr == MSG_FRAGMENTER) {
1120 l->stats.recv_fragments++;
1121 if (tipc_buf_append(reasm_skb, &skb)) {
1122 l->stats.recv_fragmented++;
Jon Paul Maloy9073fb82015-07-30 18:24:25 -04001123 tipc_data_input(l, skb, inputq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001124 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1125 pr_warn_ratelimited("Unable to build fragment list\n");
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001126 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05001127 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001128 return 0;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001129 } else if (usr == BCAST_PROTOCOL) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001130 tipc_bcast_lock(l->net);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001131 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001132 tipc_bcast_unlock(l->net);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001133 }
1134drop:
1135 kfree_skb(skb);
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001136 return 0;
Erik Hugne7ae934b2014-07-01 10:22:40 +02001137}
1138
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001139static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1140{
1141 bool released = false;
1142 struct sk_buff *skb, *tmp;
1143
1144 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1145 if (more(buf_seqno(skb), acked))
1146 break;
1147 __skb_unlink(skb, &l->transmq);
1148 kfree_skb(skb);
1149 released = true;
1150 }
1151 return released;
1152}
1153
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001154/* tipc_link_build_state_msg: prepare link state message for transmission
Jon Paul Maloy52666982015-10-22 08:51:41 -04001155 *
1156 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1157 * risk of ack storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001158 */
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001159int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001160{
Jon Paul Maloy52666982015-10-22 08:51:41 -04001161 if (!l)
1162 return 0;
1163
1164 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1165 if (link_is_bc_rcvlink(l)) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001166 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001167 return 0;
1168 l->rcv_unacked = 0;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001169
1170 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1171 l->snd_nxt = l->rcv_nxt;
1172 return TIPC_LINK_SND_STATE;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001173 }
1174
1175 /* Unicast ACK */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001176 l->rcv_unacked = 0;
1177 l->stats.sent_acks++;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001178 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001179 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001180}
1181
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001182/* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1183 */
1184void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1185{
1186 int mtyp = RESET_MSG;
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001187 struct sk_buff *skb;
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001188
1189 if (l->state == LINK_ESTABLISHING)
1190 mtyp = ACTIVATE_MSG;
1191
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001192 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001193
1194 /* Inform peer that this endpoint is going down if applicable */
1195 skb = skb_peek_tail(xmitq);
1196 if (skb && (l->state == LINK_RESET))
1197 msg_set_peer_stopping(buf_msg(skb), 1);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -04001198}
1199
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001200/* tipc_link_build_nack_msg: prepare link nack message for transmission
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001201 * Note that sending of broadcast NACK is coordinated among nodes, to
1202 * reduce the risk of NACK storms towards the sender
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001203 */
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001204static int tipc_link_build_nack_msg(struct tipc_link *l,
1205 struct sk_buff_head *xmitq)
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001206{
1207 u32 def_cnt = ++l->stats.deferred_recv;
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001208 int match1, match2;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001209
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001210 if (link_is_bc_rcvlink(l)) {
1211 match1 = def_cnt & 0xf;
1212 match2 = tipc_own_addr(l->net) & 0xf;
1213 if (match1 == match2)
1214 return TIPC_LINK_SND_STATE;
1215 return 0;
1216 }
Jon Paul Maloy52666982015-10-22 08:51:41 -04001217
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001218 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001219 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, 0, xmitq);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001220 return 0;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001221}
1222
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001223/* tipc_link_rcv - process TIPC packets/messages arriving from off-node
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001224 * @l: the link that should handle the message
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001225 * @skb: TIPC packet
1226 * @xmitq: queue to place packets to be sent after this call
1227 */
1228int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1229 struct sk_buff_head *xmitq)
1230{
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001231 struct sk_buff_head *defq = &l->deferdq;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001232 struct tipc_msg *hdr;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001233 u16 seqno, rcv_nxt, win_lim;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001234 int rc = 0;
1235
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001236 do {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001237 hdr = buf_msg(skb);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001238 seqno = msg_seqno(hdr);
1239 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001240 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001241
1242 /* Verify and update link state */
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001243 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1244 return tipc_link_proto_rcv(l, skb, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001245
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001246 if (unlikely(!link_is_up(l))) {
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001247 if (l->state == LINK_ESTABLISHING)
1248 rc = TIPC_LINK_UP_EVT;
1249 goto drop;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001250 }
1251
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001252 /* Don't send probe at next timeout expiration */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001253 l->silent_intv_cnt = 0;
1254
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001255 /* Drop if outside receive window */
1256 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1257 l->stats.duplicates++;
1258 goto drop;
1259 }
1260
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001261 /* Forward queues and wake up waiting users */
1262 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1263 tipc_link_advance_backlog(l, xmitq);
1264 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1265 link_prepare_wakeup(l);
1266 }
1267
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001268 /* Defer delivery if sequence gap */
1269 if (unlikely(seqno != rcv_nxt)) {
Jon Paul Maloy8306f992015-10-15 14:52:43 -04001270 __tipc_skb_queue_sorted(defq, seqno, skb);
Jon Paul Maloye0a05eb2016-09-01 13:52:51 -04001271 rc |= tipc_link_build_nack_msg(l, xmitq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001272 break;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001273 }
1274
Jon Paul Maloy81204c42015-10-15 14:52:42 -04001275 /* Deliver packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001276 l->rcv_nxt++;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001277 l->stats.recv_pkts++;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001278 if (!tipc_data_input(l, skb, l->inputq))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001279 rc |= tipc_link_input(l, skb, l->inputq);
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001280 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001281 rc |= tipc_link_build_state_msg(l, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001282 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
Jon Paul Maloy52666982015-10-22 08:51:41 -04001283 break;
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001284 } while ((skb = __skb_dequeue(defq)));
1285
1286 return rc;
1287drop:
1288 kfree_skb(skb);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001289 return rc;
1290}
1291
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001292static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001293 bool probe_reply, u16 rcvgap,
1294 int tolerance, int priority,
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001295 struct sk_buff_head *xmitq)
1296{
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001297 struct tipc_link *bcl = l->bc_rcvlink;
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001298 struct sk_buff *skb;
1299 struct tipc_msg *hdr;
1300 struct sk_buff_head *dfq = &l->deferdq;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001301 bool node_up = link_is_up(bcl);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001302 struct tipc_mon_state *mstate = &l->mon_state;
1303 int dlen = 0;
1304 void *data;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001305
1306 /* Don't send protocol message during reset or link failover */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001307 if (tipc_link_is_blocked(l))
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001308 return;
1309
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001310 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1311 return;
1312
1313 if (!skb_queue_empty(dfq))
1314 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1315
1316 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001317 tipc_max_domain_size, l->addr,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001318 tipc_own_addr(l->net), 0, 0, 0);
1319 if (!skb)
1320 return;
1321
1322 hdr = buf_msg(skb);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001323 data = msg_data(hdr);
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001324 msg_set_session(hdr, l->session);
1325 msg_set_bearer_id(hdr, l->bearer_id);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001326 msg_set_net_plane(hdr, l->net_plane);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001327 msg_set_next_sent(hdr, l->snd_nxt);
1328 msg_set_ack(hdr, l->rcv_nxt - 1);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001329 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001330 msg_set_bc_ack_invalid(hdr, !node_up);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001331 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001332 msg_set_link_tolerance(hdr, tolerance);
1333 msg_set_linkprio(hdr, priority);
1334 msg_set_redundant_link(hdr, node_up);
1335 msg_set_seq_gap(hdr, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001336 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001337
1338 if (mtyp == STATE_MSG) {
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001339 msg_set_seq_gap(hdr, rcvgap);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001340 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001341 msg_set_probe(hdr, probe);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001342 msg_set_is_keepalive(hdr, probe || probe_reply);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001343 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1344 msg_set_size(hdr, INT_H_SIZE + dlen);
1345 skb_trim(skb, INT_H_SIZE + dlen);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001346 l->stats.sent_states++;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001347 l->rcv_unacked = 0;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001348 } else {
1349 /* RESET_MSG or ACTIVATE_MSG */
1350 msg_set_max_pkt(hdr, l->advertised_mtu);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001351 strcpy(data, l->if_name);
1352 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1353 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001354 }
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001355 if (probe)
1356 l->stats.sent_probes++;
1357 if (rcvgap)
1358 l->stats.sent_nacks++;
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001359 skb->priority = TC_PRIO_CONTROL;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001360 __skb_queue_tail(xmitq, skb);
Jon Paul Maloy426cc2b2015-07-16 16:54:26 -04001361}
Per Lidenb97bf3f2006-01-02 19:04:38 +01001362
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001363/* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
Jon Paul Maloyf9aa3582015-10-15 14:52:41 -04001364 * with contents of the link's transmit and backlog queues.
Per Lidenb97bf3f2006-01-02 19:04:38 +01001365 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001366void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1367 int mtyp, struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001368{
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001369 struct sk_buff *skb, *tnlskb;
1370 struct tipc_msg *hdr, tnlhdr;
1371 struct sk_buff_head *queue = &l->transmq;
1372 struct sk_buff_head tmpxq, tnlq;
1373 u16 pktlen, pktcnt, seqno = l->snd_nxt;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001374
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001375 if (!tnl)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001376 return;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001377
1378 skb_queue_head_init(&tnlq);
1379 skb_queue_head_init(&tmpxq);
1380
1381 /* At least one packet required for safe algorithm => add dummy */
1382 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001383 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001384 0, 0, TIPC_ERR_NO_PORT);
Ying Xuea6ca1092014-11-26 11:41:55 +08001385 if (!skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001386 pr_warn("%sunable to create tunnel packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001387 return;
Allan Stephens5392d642006-06-25 23:52:50 -07001388 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001389 skb_queue_tail(&tnlq, skb);
1390 tipc_link_xmit(l, &tnlq, &tmpxq);
1391 __skb_queue_purge(&tmpxq);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001392
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001393 /* Initialize reusable tunnel packet header */
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001394 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001395 mtyp, INT_H_SIZE, l->addr);
1396 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1397 msg_set_msgcnt(&tnlhdr, pktcnt);
1398 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1399tnl:
1400 /* Wrap each packet into a tunnel packet */
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001401 skb_queue_walk(queue, skb) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001402 hdr = buf_msg(skb);
1403 if (queue == &l->backlogq)
1404 msg_set_seqno(hdr, seqno++);
1405 pktlen = msg_size(hdr);
1406 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
Parthasarathy Bhuvaragan57d5f642017-01-13 15:46:25 +01001407 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001408 if (!tnlskb) {
1409 pr_warn("%sunable to send packet\n", link_co_err);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001410 return;
1411 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001412 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1413 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1414 __skb_queue_tail(&tnlq, tnlskb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001415 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001416 if (queue != &l->backlogq) {
1417 queue = &l->backlogq;
1418 goto tnl;
Jon Paul Maloydd3f9e72015-05-14 10:46:18 -04001419 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001420
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001421 tipc_link_xmit(tnl, &tnlq, xmitq);
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001422
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001423 if (mtyp == FAILOVER_MSG) {
1424 tnl->drop_point = l->rcv_nxt;
1425 tnl->failover_reasm_skb = l->reasm_buf;
1426 l->reasm_buf = NULL;
Jon Paul Maloyf006c9c2014-02-13 17:29:11 -05001427 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001428}
1429
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001430/* tipc_link_proto_rcv(): receive link level protocol message :
1431 * Note that network plane id propagates through the network, and may
1432 * change at any time. The node with lowest numerical id determines
1433 * network plane
1434 */
1435static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1436 struct sk_buff_head *xmitq)
1437{
1438 struct tipc_msg *hdr = buf_msg(skb);
1439 u16 rcvgap = 0;
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001440 u16 ack = msg_ack(hdr);
1441 u16 gap = msg_seq_gap(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001442 u16 peers_snd_nxt = msg_next_sent(hdr);
1443 u16 peers_tol = msg_link_tolerance(hdr);
1444 u16 peers_prio = msg_linkprio(hdr);
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001445 u16 rcv_nxt = l->rcv_nxt;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001446 u16 dlen = msg_data_sz(hdr);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001447 int mtyp = msg_type(hdr);
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001448 bool reply = msg_probe(hdr);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001449 void *data;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001450 char *if_name;
1451 int rc = 0;
1452
Jon Paul Maloy52666982015-10-22 08:51:41 -04001453 if (tipc_link_is_blocked(l) || !xmitq)
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001454 goto exit;
1455
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001456 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001457 l->net_plane = msg_net_plane(hdr);
1458
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001459 skb_linearize(skb);
1460 hdr = buf_msg(skb);
1461 data = msg_data(hdr);
1462
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001463 switch (mtyp) {
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001464 case RESET_MSG:
1465
1466 /* Ignore duplicate RESET with old session number */
1467 if ((less_eq(msg_session(hdr), l->peer_session)) &&
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001468 (l->peer_session != ANY_SESSION))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001469 break;
1470 /* fall thru' */
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001471
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001472 case ACTIVATE_MSG:
1473
1474 /* Complete own link name with peer's interface name */
1475 if_name = strrchr(l->name, ':') + 1;
1476 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1477 break;
1478 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1479 break;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001480 strncpy(if_name, data, TIPC_MAX_IF_NAME);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001481
1482 /* Update own tolerance if peer indicates a non-zero value */
1483 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1484 l->tolerance = peers_tol;
1485
1486 /* Update own priority if peer's priority is higher */
1487 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1488 l->priority = peers_prio;
1489
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001490 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
Jon Paul Maloy634696b2016-04-15 13:33:03 -04001491 if (msg_peer_stopping(hdr))
1492 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1493 else if ((mtyp == RESET_MSG) || !link_is_up(l))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001494 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1495
1496 /* ACTIVATE_MSG takes up link if it was already locally reset */
1497 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1498 rc = TIPC_LINK_UP_EVT;
1499
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001500 l->peer_session = msg_session(hdr);
1501 l->peer_bearer_id = msg_bearer_id(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001502 if (l->mtu > msg_max_pkt(hdr))
1503 l->mtu = msg_max_pkt(hdr);
1504 break;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001505
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001506 case STATE_MSG:
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001507
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001508 /* Update own tolerance if peer indicates a non-zero value */
1509 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1510 l->tolerance = peers_tol;
1511
Jon Paul Maloyf7967552016-11-23 21:05:26 -05001512 /* Update own prio if peer indicates a different value */
1513 if ((peers_prio != l->priority) &&
1514 in_range(peers_prio, 1, TIPC_MAX_LINK_PRI)) {
Richard Alpe81729812016-02-01 08:19:57 +01001515 l->priority = peers_prio;
1516 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1517 }
1518
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001519 l->silent_intv_cnt = 0;
1520 l->stats.recv_states++;
1521 if (msg_probe(hdr))
1522 l->stats.recv_probes++;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001523
1524 if (!link_is_up(l)) {
1525 if (l->state == LINK_ESTABLISHING)
1526 rc = TIPC_LINK_UP_EVT;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001527 break;
Jon Paul Maloy73f646c2015-10-15 14:52:44 -04001528 }
Jon Paul Maloy35c55c92016-06-13 20:46:22 -04001529 tipc_mon_rcv(l->net, data, dlen, l->addr,
1530 &l->mon_state, l->bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001531
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001532 /* Send NACK if peer has sent pkts we haven't received yet */
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001533 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001534 rcvgap = peers_snd_nxt - l->rcv_nxt;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01001535 if (rcvgap || reply)
1536 tipc_link_build_proto_msg(l, STATE_MSG, 0, reply,
1537 rcvgap, 0, 0, xmitq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001538 tipc_link_release_pkts(l, ack);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001539
1540 /* If NACK, retransmit will now start at right position */
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001541 if (gap) {
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001542 rc = tipc_link_retrans(l, l, ack + 1, ack + gap, xmitq);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001543 l->stats.recv_nacks++;
1544 }
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001545
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001546 tipc_link_advance_backlog(l, xmitq);
1547 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1548 link_prepare_wakeup(l);
1549 }
1550exit:
1551 kfree_skb(skb);
1552 return rc;
1553}
1554
Jon Paul Maloy52666982015-10-22 08:51:41 -04001555/* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1556 */
1557static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1558 u16 peers_snd_nxt,
1559 struct sk_buff_head *xmitq)
1560{
1561 struct sk_buff *skb;
1562 struct tipc_msg *hdr;
1563 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1564 u16 ack = l->rcv_nxt - 1;
1565 u16 gap_to = peers_snd_nxt - 1;
1566
1567 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001568 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001569 if (!skb)
1570 return false;
1571 hdr = buf_msg(skb);
1572 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1573 msg_set_bcast_ack(hdr, ack);
1574 msg_set_bcgap_after(hdr, ack);
1575 if (dfrd_skb)
1576 gap_to = buf_seqno(dfrd_skb) - 1;
1577 msg_set_bcgap_to(hdr, gap_to);
1578 msg_set_non_seq(hdr, bcast);
1579 __skb_queue_tail(xmitq, skb);
1580 return true;
1581}
1582
1583/* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1584 *
1585 * Give a newly added peer node the sequence number where it should
1586 * start receiving and acking broadcast packets.
1587 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001588static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1589 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001590{
1591 struct sk_buff_head list;
1592
1593 __skb_queue_head_init(&list);
1594 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1595 return;
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001596 msg_set_bc_ack_invalid(buf_msg(skb_peek(&list)), true);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001597 tipc_link_xmit(l, &list, xmitq);
1598}
1599
1600/* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1601 */
1602void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1603{
1604 int mtyp = msg_type(hdr);
1605 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1606
1607 if (link_is_up(l))
1608 return;
1609
1610 if (msg_user(hdr) == BCAST_PROTOCOL) {
1611 l->rcv_nxt = peers_snd_nxt;
1612 l->state = LINK_ESTABLISHED;
1613 return;
1614 }
1615
1616 if (l->peer_caps & TIPC_BCAST_SYNCH)
1617 return;
1618
1619 if (msg_peer_node_is_up(hdr))
1620 return;
1621
1622 /* Compatibility: accept older, less safe initial synch data */
1623 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1624 l->rcv_nxt = peers_snd_nxt;
1625}
1626
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001627/* link_bc_retr eval()- check if the indicated range can be retransmitted now
1628 * - Adjust permitted range if there is overlap with previous retransmission
1629 */
1630static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1631{
1632 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1633
1634 if (less(*to, *from))
1635 return false;
1636
1637 /* New retransmission request */
1638 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1639 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1640 l->prev_from = *from;
1641 l->prev_to = *to;
1642 l->prev_retr = jiffies;
1643 return true;
1644 }
1645
1646 /* Inside range of previous retransmit */
1647 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1648 return false;
1649
1650 /* Fully or partially outside previous range => exclude overlap */
1651 if (less(*from, l->prev_from)) {
1652 *to = l->prev_from - 1;
1653 l->prev_from = *from;
1654 }
1655 if (more(*to, l->prev_to)) {
1656 *from = l->prev_to + 1;
1657 l->prev_to = *to;
1658 }
1659 l->prev_retr = jiffies;
1660 return true;
1661}
1662
Jon Paul Maloy52666982015-10-22 08:51:41 -04001663/* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1664 */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001665int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1666 struct sk_buff_head *xmitq)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001667{
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001668 struct tipc_link *snd_l = l->bc_sndlink;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001669 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001670 u16 from = msg_bcast_ack(hdr) + 1;
1671 u16 to = from + msg_bc_gap(hdr) - 1;
1672 int rc = 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001673
1674 if (!link_is_up(l))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001675 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001676
1677 if (!msg_peer_node_is_up(hdr))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001678 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001679
Jon Paul Maloy2d18ac42016-07-11 16:08:35 -04001680 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1681 if (msg_ack(hdr))
1682 l->bc_peer_is_up = true;
1683
1684 if (!l->bc_peer_is_up)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001685 return rc;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001686
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001687 l->stats.recv_nacks++;
1688
Jon Paul Maloy52666982015-10-22 08:51:41 -04001689 /* Ignore if peers_snd_nxt goes beyond receive window */
1690 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001691 return rc;
1692
Jon Paul Maloy7c4a54b2016-09-01 13:52:50 -04001693 if (link_bc_retr_eval(snd_l, &from, &to))
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001694 rc = tipc_link_retrans(snd_l, l, from, to, xmitq);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001695
1696 l->snd_nxt = peers_snd_nxt;
1697 if (link_bc_rcv_gap(l))
1698 rc |= TIPC_LINK_SND_STATE;
1699
1700 /* Return now if sender supports nack via STATE messages */
1701 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1702 return rc;
1703
1704 /* Otherwise, be backwards compatible */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001705
1706 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1707 l->nack_state = BC_NACK_SND_CONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001708 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001709 }
1710
1711 /* Don't NACK if one was recently sent or peeked */
1712 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1713 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001714 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001715 }
1716
1717 /* Conditionally delay NACK sending until next synch rcv */
1718 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1719 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1720 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001721 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001722 }
1723
1724 /* Send NACK now but suppress next one */
1725 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1726 l->nack_state = BC_NACK_SND_SUPPRESS;
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001727 return 0;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001728}
1729
1730void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1731 struct sk_buff_head *xmitq)
1732{
1733 struct sk_buff *skb, *tmp;
1734 struct tipc_link *snd_l = l->bc_sndlink;
1735
1736 if (!link_is_up(l) || !l->bc_peer_is_up)
1737 return;
1738
1739 if (!more(acked, l->acked))
1740 return;
1741
1742 /* Skip over packets peer has already acked */
1743 skb_queue_walk(&snd_l->transmq, skb) {
1744 if (more(buf_seqno(skb), l->acked))
1745 break;
1746 }
1747
1748 /* Update/release the packets peer is acking now */
1749 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1750 if (more(buf_seqno(skb), acked))
1751 break;
1752 if (!--TIPC_SKB_CB(skb)->ackers) {
1753 __skb_unlink(skb, &snd_l->transmq);
1754 kfree_skb(skb);
1755 }
1756 }
1757 l->acked = acked;
1758 tipc_link_advance_backlog(snd_l, xmitq);
1759 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1760 link_prepare_wakeup(snd_l);
1761}
1762
1763/* tipc_link_bc_nack_rcv(): receive broadcast nack message
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001764 * This function is here for backwards compatibility, since
1765 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
Jon Paul Maloy52666982015-10-22 08:51:41 -04001766 */
1767int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1768 struct sk_buff_head *xmitq)
1769{
1770 struct tipc_msg *hdr = buf_msg(skb);
1771 u32 dnode = msg_destnode(hdr);
1772 int mtyp = msg_type(hdr);
1773 u16 acked = msg_bcast_ack(hdr);
1774 u16 from = acked + 1;
1775 u16 to = msg_bcgap_to(hdr);
1776 u16 peers_snd_nxt = to + 1;
1777 int rc = 0;
1778
1779 kfree_skb(skb);
1780
1781 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1782 return 0;
1783
1784 if (mtyp != STATE_MSG)
1785 return 0;
1786
Jon Paul Maloye74a3862016-03-03 14:23:21 -05001787 if (dnode == tipc_own_addr(l->net)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001788 tipc_link_bc_ack_rcv(l, acked, xmitq);
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001789 rc = tipc_link_retrans(l->bc_sndlink, l, from, to, xmitq);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001790 l->stats.recv_nacks++;
1791 return rc;
1792 }
1793
1794 /* Msg for other node => suppress own NACK at next sync if applicable */
1795 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1796 l->nack_state = BC_NACK_SND_SUPPRESS;
1797
1798 return 0;
1799}
1800
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001801void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001802{
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001803 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
Jon Paul Maloy05dcc5a2015-03-13 16:08:10 -04001804
Jon Paul Maloye3eea1e2015-03-13 16:08:11 -04001805 l->window = win;
Jon Paul Maloy5a0950c2016-08-16 11:53:51 -04001806 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1807 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1808 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1809 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001810 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001811}
1812
Allan Stephens5c216e12011-10-18 11:34:29 -04001813/**
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001814 * link_reset_stats - reset link statistics
Jon Paul Maloy1a906322015-11-19 14:30:47 -05001815 * @l: pointer to link
Per Lidenb97bf3f2006-01-02 19:04:38 +01001816 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001817void tipc_link_reset_stats(struct tipc_link *l)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001818{
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001819 memset(&l->stats, 0, sizeof(l->stats));
Per Lidenb97bf3f2006-01-02 19:04:38 +01001820}
1821
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001822static void link_print(struct tipc_link *l, const char *str)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001823{
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001824 struct sk_buff *hskb = skb_peek(&l->transmq);
Jon Paul Maloyc1ab3f1d2015-10-22 08:51:38 -04001825 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001826 u16 tail = l->snd_nxt - 1;
Ying Xue7a2f7d12014-04-21 10:55:46 +08001827
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001828 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001829 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1830 skb_queue_len(&l->transmq), head, tail,
1831 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001832}
Richard Alpe0655f6a2014-11-20 10:29:07 +01001833
1834/* Parse and validate nested (link) properties valid for media, bearer and link
1835 */
1836int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1837{
1838 int err;
1839
1840 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
Johannes Bergfceb6432017-04-12 14:34:07 +02001841 tipc_nl_prop_policy, NULL);
Richard Alpe0655f6a2014-11-20 10:29:07 +01001842 if (err)
1843 return err;
1844
1845 if (props[TIPC_NLA_PROP_PRIO]) {
1846 u32 prio;
1847
1848 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1849 if (prio > TIPC_MAX_LINK_PRI)
1850 return -EINVAL;
1851 }
1852
1853 if (props[TIPC_NLA_PROP_TOL]) {
1854 u32 tol;
1855
1856 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1857 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1858 return -EINVAL;
1859 }
1860
1861 if (props[TIPC_NLA_PROP_WIN]) {
1862 u32 win;
1863
1864 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1865 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1866 return -EINVAL;
1867 }
1868
1869 return 0;
1870}
Richard Alpe7be57fc2014-11-20 10:29:12 +01001871
Richard Alped8182802014-11-24 11:10:29 +01001872static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001873{
1874 int i;
1875 struct nlattr *stats;
1876
1877 struct nla_map {
1878 u32 key;
1879 u32 val;
1880 };
1881
1882 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05001883 {TIPC_NLA_STATS_RX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01001884 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1885 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1886 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1887 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05001888 {TIPC_NLA_STATS_TX_INFO, 0},
Richard Alpe7be57fc2014-11-20 10:29:12 +01001889 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1890 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1891 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1892 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1893 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1894 s->msg_length_counts : 1},
1895 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1896 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1897 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1898 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1899 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1900 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1901 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1902 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1903 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1904 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1905 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1906 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1907 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1908 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1909 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1910 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1911 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1912 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1913 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1914 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1915 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1916 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1917 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1918 };
1919
1920 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1921 if (!stats)
1922 return -EMSGSIZE;
1923
1924 for (i = 0; i < ARRAY_SIZE(map); i++)
1925 if (nla_put_u32(skb, map[i].key, map[i].val))
1926 goto msg_full;
1927
1928 nla_nest_end(skb, stats);
1929
1930 return 0;
1931msg_full:
1932 nla_nest_cancel(skb, stats);
1933
1934 return -EMSGSIZE;
1935}
1936
1937/* Caller should hold appropriate locks to protect the link */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001938int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1939 struct tipc_link *link, int nlflags)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001940{
1941 int err;
1942 void *hdr;
1943 struct nlattr *attrs;
1944 struct nlattr *prop;
Ying Xue34747532015-01-09 15:27:10 +08001945 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001946
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001947 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Nicolas Dichtelf2f67392015-04-28 18:33:50 +02001948 nlflags, TIPC_NL_LINK_GET);
Richard Alpe7be57fc2014-11-20 10:29:12 +01001949 if (!hdr)
1950 return -EMSGSIZE;
1951
1952 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1953 if (!attrs)
1954 goto msg_full;
1955
1956 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1957 goto attr_msg_full;
1958 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
Ying Xue34747532015-01-09 15:27:10 +08001959 tipc_cluster_mask(tn->own_addr)))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001960 goto attr_msg_full;
Jon Paul Maloyed193ec2015-04-02 09:33:02 -04001961 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001962 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001963 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->stats.recv_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001964 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05001965 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->stats.sent_pkts))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001966 goto attr_msg_full;
1967
1968 if (tipc_link_is_up(link))
1969 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1970 goto attr_msg_full;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -04001971 if (link->active)
Richard Alpe7be57fc2014-11-20 10:29:12 +01001972 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1973 goto attr_msg_full;
1974
1975 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1976 if (!prop)
1977 goto attr_msg_full;
1978 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1979 goto prop_msg_full;
1980 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1981 goto prop_msg_full;
1982 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
Jon Paul Maloy1f66d162015-03-25 12:07:24 -04001983 link->window))
Richard Alpe7be57fc2014-11-20 10:29:12 +01001984 goto prop_msg_full;
1985 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1986 goto prop_msg_full;
1987 nla_nest_end(msg->skb, prop);
1988
1989 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1990 if (err)
1991 goto attr_msg_full;
1992
1993 nla_nest_end(msg->skb, attrs);
1994 genlmsg_end(msg->skb, hdr);
1995
1996 return 0;
1997
1998prop_msg_full:
1999 nla_nest_cancel(msg->skb, prop);
2000attr_msg_full:
2001 nla_nest_cancel(msg->skb, attrs);
2002msg_full:
2003 genlmsg_cancel(msg->skb, hdr);
2004
2005 return -EMSGSIZE;
2006}
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002007
2008static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
2009 struct tipc_stats *stats)
2010{
2011 int i;
2012 struct nlattr *nest;
2013
2014 struct nla_map {
2015 __u32 key;
2016 __u32 val;
2017 };
2018
2019 struct nla_map map[] = {
Jon Paul Maloy95901122016-11-25 10:35:02 -05002020 {TIPC_NLA_STATS_RX_INFO, stats->recv_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002021 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2022 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2023 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2024 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
Jon Paul Maloy95901122016-11-25 10:35:02 -05002025 {TIPC_NLA_STATS_TX_INFO, stats->sent_pkts},
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002026 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2027 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2028 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2029 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2030 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2031 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2032 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2033 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2034 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2035 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2036 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2037 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2038 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2039 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2040 };
2041
2042 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2043 if (!nest)
2044 return -EMSGSIZE;
2045
2046 for (i = 0; i < ARRAY_SIZE(map); i++)
2047 if (nla_put_u32(skb, map[i].key, map[i].val))
2048 goto msg_full;
2049
2050 nla_nest_end(skb, nest);
2051
2052 return 0;
2053msg_full:
2054 nla_nest_cancel(skb, nest);
2055
2056 return -EMSGSIZE;
2057}
2058
2059int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2060{
2061 int err;
2062 void *hdr;
2063 struct nlattr *attrs;
2064 struct nlattr *prop;
2065 struct tipc_net *tn = net_generic(net, tipc_net_id);
2066 struct tipc_link *bcl = tn->bcl;
2067
2068 if (!bcl)
2069 return 0;
2070
2071 tipc_bcast_lock(net);
2072
2073 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2074 NLM_F_MULTI, TIPC_NL_LINK_GET);
Insu Yunb53ce3e2016-02-17 11:47:35 -05002075 if (!hdr) {
2076 tipc_bcast_unlock(net);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002077 return -EMSGSIZE;
Insu Yunb53ce3e2016-02-17 11:47:35 -05002078 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002079
2080 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2081 if (!attrs)
2082 goto msg_full;
2083
2084 /* The broadcast link is always up */
2085 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2086 goto attr_msg_full;
2087
2088 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2089 goto attr_msg_full;
2090 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2091 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002092 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002093 goto attr_msg_full;
Jon Paul Maloy95901122016-11-25 10:35:02 -05002094 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, 0))
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002095 goto attr_msg_full;
2096
2097 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2098 if (!prop)
2099 goto attr_msg_full;
2100 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2101 goto prop_msg_full;
2102 nla_nest_end(msg->skb, prop);
2103
2104 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2105 if (err)
2106 goto attr_msg_full;
2107
2108 tipc_bcast_unlock(net);
2109 nla_nest_end(msg->skb, attrs);
2110 genlmsg_end(msg->skb, hdr);
2111
2112 return 0;
2113
2114prop_msg_full:
2115 nla_nest_cancel(msg->skb, prop);
2116attr_msg_full:
2117 nla_nest_cancel(msg->skb, attrs);
2118msg_full:
2119 tipc_bcast_unlock(net);
2120 genlmsg_cancel(msg->skb, hdr);
2121
2122 return -EMSGSIZE;
2123}
2124
Richard Alped01332f2016-02-01 08:19:56 +01002125void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2126 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002127{
2128 l->tolerance = tol;
Jon Maloy37c64cf2018-02-14 13:34:39 +01002129 if (link_is_up(l))
2130 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002131}
2132
Richard Alped01332f2016-02-01 08:19:56 +01002133void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2134 struct sk_buff_head *xmitq)
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002135{
2136 l->priority = prio;
Jon Maloy8d6e79d2017-11-08 09:59:26 +01002137 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, prio, xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002138}
2139
2140void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2141{
2142 l->abort_limit = limit;
2143}