blob: 8a4b04933ecc4caa4040553b2cf6c101d3d095b0 [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
2 * net/tipc/node.c: TIPC node management routines
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Paul Maloy60020e12016-05-02 11:58:46 -04004 * Copyright (c) 2000-2006, 2012-2016, Ericsson AB
Ying Xue46651c52014-03-27 12:54:36 +08005 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
Per Lidenb97bf3f2006-01-02 19:04:38 +01006 * All rights reserved.
7 *
Per Liden9ea1fd32006-01-11 13:30:43 +01008 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +01009 * modification, are permitted provided that the following conditions are met:
10 *
Per Liden9ea1fd32006-01-11 13:30:43 +010011 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010019 *
Per Liden9ea1fd32006-01-11 13:30:43 +010020 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010034 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
Richard Alpe22ae7cf2015-02-09 09:50:18 +010038#include "link.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010039#include "node.h"
Per Lidenb97bf3f2006-01-02 19:04:38 +010040#include "name_distr.h"
Jon Paul Maloy50100a52014-08-22 18:09:07 -040041#include "socket.h"
Jon Paul Maloya6bf70f2015-05-14 10:46:13 -040042#include "bcast.h"
Jon Paul Maloy35c55c92016-06-13 20:46:22 -040043#include "monitor.h"
Jon Paul Maloyd9992972015-07-16 16:54:31 -040044#include "discover.h"
Richard Alpe49cc66e2016-03-04 17:04:42 +010045#include "netlink.h"
Jon Paul Maloyc8199302015-10-15 14:52:46 -040046
Jon Paul Maloy5be9c082015-11-19 14:30:45 -050047#define INVALID_NODE_SIG 0x10000
48
Jon Paul Maloy5be9c082015-11-19 14:30:45 -050049/* Flags used to take different actions according to flag type
50 * TIPC_NOTIFY_NODE_DOWN: notify node is down
51 * TIPC_NOTIFY_NODE_UP: notify node is up
52 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
53 */
54enum {
55 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
56 TIPC_NOTIFY_NODE_UP = (1 << 4),
57 TIPC_NOTIFY_LINK_UP = (1 << 6),
58 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
59};
60
61struct tipc_link_entry {
62 struct tipc_link *link;
63 spinlock_t lock; /* per link */
64 u32 mtu;
65 struct sk_buff_head inputq;
66 struct tipc_media_addr maddr;
67};
68
69struct tipc_bclink_entry {
70 struct tipc_link *link;
71 struct sk_buff_head inputq1;
72 struct sk_buff_head arrvq;
73 struct sk_buff_head inputq2;
74 struct sk_buff_head namedq;
75};
76
77/**
78 * struct tipc_node - TIPC node structure
79 * @addr: network address of node
80 * @ref: reference counter to node object
81 * @lock: rwlock governing access to structure
82 * @net: the applicable net namespace
83 * @hash: links to adjacent nodes in unsorted hash chain
84 * @inputq: pointer to input queue containing messages for msg event
85 * @namedq: pointer to name table input queue with name table messages
86 * @active_links: bearer ids of active links, used as index into links[] array
87 * @links: array containing references to all links to node
88 * @action_flags: bit mask of different types of node actions
89 * @state: connectivity state vs peer node
90 * @sync_point: sequence number where synch/failover is finished
91 * @list: links to adjacent nodes in sorted list of cluster's nodes
92 * @working_links: number of working links to node (both active and standby)
93 * @link_cnt: number of links to node
94 * @capabilities: bitmap, indicating peer node's functional capabilities
95 * @signature: node instance identifier
96 * @link_id: local and remote bearer ids of changing link, if any
97 * @publ_list: list of publications
98 * @rcu: rcu struct for tipc_node
99 */
100struct tipc_node {
101 u32 addr;
102 struct kref kref;
103 rwlock_t lock;
104 struct net *net;
105 struct hlist_node hash;
106 int active_links[2];
107 struct tipc_link_entry links[MAX_BEARERS];
108 struct tipc_bclink_entry bc_entry;
109 int action_flags;
110 struct list_head list;
111 int state;
112 u16 sync_point;
113 int link_cnt;
114 u16 working_links;
115 u16 capabilities;
116 u32 signature;
117 u32 link_id;
118 struct list_head publ_list;
119 struct list_head conn_sks;
120 unsigned long keepalive_intv;
121 struct timer_list timer;
122 struct rcu_head rcu;
123};
124
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400125/* Node FSM states and events:
126 */
127enum {
128 SELF_DOWN_PEER_DOWN = 0xdd,
129 SELF_UP_PEER_UP = 0xaa,
130 SELF_DOWN_PEER_LEAVING = 0xd1,
131 SELF_UP_PEER_COMING = 0xac,
132 SELF_COMING_PEER_UP = 0xca,
133 SELF_LEAVING_PEER_DOWN = 0x1d,
134 NODE_FAILINGOVER = 0xf0,
135 NODE_SYNCHING = 0xcc
136};
137
138enum {
139 SELF_ESTABL_CONTACT_EVT = 0xece,
140 SELF_LOST_CONTACT_EVT = 0x1ce,
141 PEER_ESTABL_CONTACT_EVT = 0x9ece,
142 PEER_LOST_CONTACT_EVT = 0x91ce,
143 NODE_FAILOVER_BEGIN_EVT = 0xfbe,
144 NODE_FAILOVER_END_EVT = 0xfee,
145 NODE_SYNCH_BEGIN_EVT = 0xcbe,
146 NODE_SYNCH_END_EVT = 0xcee
147};
148
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400149static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
150 struct sk_buff_head *xmitq,
151 struct tipc_media_addr **maddr);
152static void tipc_node_link_down(struct tipc_node *n, int bearer_id,
153 bool delete);
154static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800155static void tipc_node_delete(struct tipc_node *node);
Kees Cook31b102b2017-10-30 14:06:45 -0700156static void tipc_node_timeout(struct timer_list *t);
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400157static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -0500158static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
159static void tipc_node_put(struct tipc_node *node);
Jon Maloy38077b82017-10-13 11:04:19 +0200160static bool node_is_up(struct tipc_node *n);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100161
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400162struct tipc_sock_conn {
163 u32 port;
164 u32 peer_port;
165 u32 peer_node;
166 struct list_head list;
167};
168
Jon Paul Maloy5be9c082015-11-19 14:30:45 -0500169static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
170{
171 int bearer_id = n->active_links[sel & 1];
172
173 if (unlikely(bearer_id == INVALID_BEARER_ID))
174 return NULL;
175
176 return n->links[bearer_id].link;
177}
178
179int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
180{
181 struct tipc_node *n;
182 int bearer_id;
183 unsigned int mtu = MAX_MSG_SIZE;
184
185 n = tipc_node_find(net, addr);
186 if (unlikely(!n))
187 return mtu;
188
189 bearer_id = n->active_links[sel & 1];
190 if (likely(bearer_id != INVALID_BEARER_ID))
191 mtu = n->links[bearer_id].mtu;
192 tipc_node_put(n);
193 return mtu;
194}
Jon Paul Maloy60020e12016-05-02 11:58:46 -0400195
196u16 tipc_node_get_capabilities(struct net *net, u32 addr)
197{
198 struct tipc_node *n;
199 u16 caps;
200
201 n = tipc_node_find(net, addr);
202 if (unlikely(!n))
203 return TIPC_NODE_CAPABILITIES;
204 caps = n->capabilities;
205 tipc_node_put(n);
206 return caps;
207}
208
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800209static void tipc_node_kref_release(struct kref *kref)
210{
Jon Paul Maloyd25a01252016-02-24 11:10:48 -0500211 struct tipc_node *n = container_of(kref, struct tipc_node, kref);
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800212
Jon Paul Maloyd25a01252016-02-24 11:10:48 -0500213 kfree(n->bc_entry.link);
214 kfree_rcu(n, rcu);
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800215}
216
Jon Paul Maloy5be9c082015-11-19 14:30:45 -0500217static void tipc_node_put(struct tipc_node *node)
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800218{
219 kref_put(&node->kref, tipc_node_kref_release);
220}
221
222static void tipc_node_get(struct tipc_node *node)
223{
224 kref_get(&node->kref);
225}
226
Allan Stephensa635b462011-11-04 11:54:43 -0400227/*
Allan Stephens672d99e2011-02-25 18:42:52 -0500228 * tipc_node_find - locate specified node object, if it exists
229 */
Jon Paul Maloy5be9c082015-11-19 14:30:45 -0500230static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
Allan Stephens672d99e2011-02-25 18:42:52 -0500231{
Jon Paul Maloyb1709972016-02-24 11:00:19 -0500232 struct tipc_net *tn = tipc_net(net);
Allan Stephens672d99e2011-02-25 18:42:52 -0500233 struct tipc_node *node;
Jon Paul Maloyb1709972016-02-24 11:00:19 -0500234 unsigned int thash = tipc_hashfn(addr);
Allan Stephens672d99e2011-02-25 18:42:52 -0500235
Ying Xue6c7a7622014-03-27 12:54:37 +0800236 rcu_read_lock();
Jon Paul Maloyb1709972016-02-24 11:00:19 -0500237 hlist_for_each_entry_rcu(node, &tn->node_htable[thash], hash) {
238 if (node->addr != addr)
239 continue;
240 if (!kref_get_unless_zero(&node->kref))
241 node = NULL;
242 break;
Allan Stephens672d99e2011-02-25 18:42:52 -0500243 }
Ying Xue6c7a7622014-03-27 12:54:37 +0800244 rcu_read_unlock();
Jon Paul Maloyb1709972016-02-24 11:00:19 -0500245 return node;
Allan Stephens672d99e2011-02-25 18:42:52 -0500246}
247
Jon Paul Maloy5be9c082015-11-19 14:30:45 -0500248static void tipc_node_read_lock(struct tipc_node *n)
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500249{
250 read_lock_bh(&n->lock);
251}
252
Jon Paul Maloy5be9c082015-11-19 14:30:45 -0500253static void tipc_node_read_unlock(struct tipc_node *n)
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500254{
255 read_unlock_bh(&n->lock);
256}
257
258static void tipc_node_write_lock(struct tipc_node *n)
259{
260 write_lock_bh(&n->lock);
261}
262
Parthasarathy Bhuvaragan93f955a2017-01-24 13:00:43 +0100263static void tipc_node_write_unlock_fast(struct tipc_node *n)
264{
265 write_unlock_bh(&n->lock);
266}
267
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500268static void tipc_node_write_unlock(struct tipc_node *n)
269{
270 struct net *net = n->net;
271 u32 addr = 0;
272 u32 flags = n->action_flags;
273 u32 link_id = 0;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400274 u32 bearer_id;
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500275 struct list_head *publ_list;
276
277 if (likely(!flags)) {
278 write_unlock_bh(&n->lock);
279 return;
280 }
281
282 addr = n->addr;
283 link_id = n->link_id;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400284 bearer_id = link_id & 0xffff;
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500285 publ_list = &n->publ_list;
286
287 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
288 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
289
290 write_unlock_bh(&n->lock);
291
292 if (flags & TIPC_NOTIFY_NODE_DOWN)
293 tipc_publ_notify(net, publ_list, addr);
294
295 if (flags & TIPC_NOTIFY_NODE_UP)
296 tipc_named_node_up(net, addr);
297
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400298 if (flags & TIPC_NOTIFY_LINK_UP) {
299 tipc_mon_peer_up(net, addr, bearer_id);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500300 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
301 TIPC_NODE_SCOPE, link_id, addr);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400302 }
303 if (flags & TIPC_NOTIFY_LINK_DOWN) {
304 tipc_mon_peer_down(net, addr, bearer_id);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500305 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
306 link_id, addr);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400307 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500308}
309
Jon Paul Maloycf148812015-07-30 18:24:22 -0400310struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100311{
Ying Xuef2f98002015-01-09 15:27:05 +0800312 struct tipc_net *tn = net_generic(net, tipc_net_id);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500313 struct tipc_node *n, *temp_node;
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500314 int i;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100315
Ying Xuef2f98002015-01-09 15:27:05 +0800316 spin_lock_bh(&tn->node_list_lock);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500317 n = tipc_node_find(net, addr);
Jon Paul Maloy60020e12016-05-02 11:58:46 -0400318 if (n) {
319 /* Same node may come back with new capabilities */
320 n->capabilities = capabilities;
Jon Paul Maloyb45db712015-02-03 08:59:19 -0500321 goto exit;
Jon Paul Maloy60020e12016-05-02 11:58:46 -0400322 }
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500323 n = kzalloc(sizeof(*n), GFP_ATOMIC);
324 if (!n) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -0400325 pr_warn("Node creation failed, no memory\n");
Jon Paul Maloyb45db712015-02-03 08:59:19 -0500326 goto exit;
Allan Stephensa10bd922006-06-25 23:52:17 -0700327 }
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500328 n->addr = addr;
329 n->net = net;
330 n->capabilities = capabilities;
331 kref_init(&n->kref);
332 rwlock_init(&n->lock);
333 INIT_HLIST_NODE(&n->hash);
334 INIT_LIST_HEAD(&n->list);
335 INIT_LIST_HEAD(&n->publ_list);
336 INIT_LIST_HEAD(&n->conn_sks);
337 skb_queue_head_init(&n->bc_entry.namedq);
338 skb_queue_head_init(&n->bc_entry.inputq1);
339 __skb_queue_head_init(&n->bc_entry.arrvq);
340 skb_queue_head_init(&n->bc_entry.inputq2);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500341 for (i = 0; i < MAX_BEARERS; i++)
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500342 spin_lock_init(&n->links[i].lock);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500343 n->state = SELF_DOWN_PEER_LEAVING;
344 n->signature = INVALID_NODE_SIG;
345 n->active_links[0] = INVALID_BEARER_ID;
346 n->active_links[1] = INVALID_BEARER_ID;
347 if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr,
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500348 U16_MAX,
349 tipc_link_window(tipc_bc_sndlink(net)),
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500350 n->capabilities,
351 &n->bc_entry.inputq1,
352 &n->bc_entry.namedq,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400353 tipc_bc_sndlink(net),
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500354 &n->bc_entry.link)) {
Jon Paul Maloy52666982015-10-22 08:51:41 -0400355 pr_warn("Broadcast rcv link creation failed, no memory\n");
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500356 kfree(n);
357 n = NULL;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400358 goto exit;
359 }
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500360 tipc_node_get(n);
Kees Cook31b102b2017-10-30 14:06:45 -0700361 timer_setup(&n->timer, tipc_node_timeout, 0);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500362 n->keepalive_intv = U32_MAX;
Jon Paul Maloyd5c91fb2016-02-10 16:14:57 -0500363 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
364 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
365 if (n->addr < temp_node->addr)
366 break;
367 }
368 list_add_tail_rcu(&n->list, &temp_node->list);
Jon Paul Maloyb45db712015-02-03 08:59:19 -0500369exit:
Ying Xuef2f98002015-01-09 15:27:05 +0800370 spin_unlock_bh(&tn->node_list_lock);
Jon Paul Maloy1a906322015-11-19 14:30:47 -0500371 return n;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100372}
373
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400374static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
375{
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500376 unsigned long tol = tipc_link_tolerance(l);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400377 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400378
379 /* Link with lowest tolerance determines timer interval */
Jon Paul Maloy5ca509f2016-06-08 12:00:05 -0400380 if (intv < n->keepalive_intv)
381 n->keepalive_intv = intv;
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400382
Jon Paul Maloy5ca509f2016-06-08 12:00:05 -0400383 /* Ensure link's abort limit corresponds to current tolerance */
384 tipc_link_set_abort_limit(l, tol / n->keepalive_intv);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400385}
386
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800387static void tipc_node_delete(struct tipc_node *node)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100388{
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800389 list_del_rcu(&node->list);
390 hlist_del_rcu(&node->hash);
Jon Paul Maloyd25a01252016-02-24 11:10:48 -0500391 tipc_node_put(node);
392
393 del_timer_sync(&node->timer);
394 tipc_node_put(node);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100395}
396
Ying Xuef2f98002015-01-09 15:27:05 +0800397void tipc_node_stop(struct net *net)
Ying Xue46651c52014-03-27 12:54:36 +0800398{
Jon Paul Maloyd25a01252016-02-24 11:10:48 -0500399 struct tipc_net *tn = tipc_net(net);
Ying Xue46651c52014-03-27 12:54:36 +0800400 struct tipc_node *node, *t_node;
401
Ying Xuef2f98002015-01-09 15:27:05 +0800402 spin_lock_bh(&tn->node_list_lock);
Jon Paul Maloyd25a01252016-02-24 11:10:48 -0500403 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
404 tipc_node_delete(node);
Ying Xuef2f98002015-01-09 15:27:05 +0800405 spin_unlock_bh(&tn->node_list_lock);
Ying Xue46651c52014-03-27 12:54:36 +0800406}
407
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -0500408void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
409{
410 struct tipc_node *n;
411
412 if (in_own_node(net, addr))
413 return;
414
415 n = tipc_node_find(net, addr);
416 if (!n) {
417 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
418 return;
419 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500420 tipc_node_write_lock(n);
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -0500421 list_add_tail(subscr, &n->publ_list);
Parthasarathy Bhuvaragan93f955a2017-01-24 13:00:43 +0100422 tipc_node_write_unlock_fast(n);
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -0500423 tipc_node_put(n);
424}
425
426void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
427{
428 struct tipc_node *n;
429
430 if (in_own_node(net, addr))
431 return;
432
433 n = tipc_node_find(net, addr);
434 if (!n) {
435 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
436 return;
437 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500438 tipc_node_write_lock(n);
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -0500439 list_del_init(subscr);
Parthasarathy Bhuvaragan93f955a2017-01-24 13:00:43 +0100440 tipc_node_write_unlock_fast(n);
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -0500441 tipc_node_put(n);
442}
443
Ying Xuef2f98002015-01-09 15:27:05 +0800444int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400445{
446 struct tipc_node *node;
447 struct tipc_sock_conn *conn;
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800448 int err = 0;
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400449
Ying Xue34747532015-01-09 15:27:10 +0800450 if (in_own_node(net, dnode))
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400451 return 0;
452
Ying Xuef2f98002015-01-09 15:27:05 +0800453 node = tipc_node_find(net, dnode);
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400454 if (!node) {
455 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
456 return -EHOSTUNREACH;
457 }
458 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800459 if (!conn) {
460 err = -EHOSTUNREACH;
461 goto exit;
462 }
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400463 conn->peer_node = dnode;
464 conn->port = port;
465 conn->peer_port = peer_port;
466
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500467 tipc_node_write_lock(node);
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400468 list_add_tail(&conn->list, &node->conn_sks);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500469 tipc_node_write_unlock(node);
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800470exit:
471 tipc_node_put(node);
472 return err;
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400473}
474
Ying Xuef2f98002015-01-09 15:27:05 +0800475void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400476{
477 struct tipc_node *node;
478 struct tipc_sock_conn *conn, *safe;
479
Ying Xue34747532015-01-09 15:27:10 +0800480 if (in_own_node(net, dnode))
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400481 return;
482
Ying Xuef2f98002015-01-09 15:27:05 +0800483 node = tipc_node_find(net, dnode);
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400484 if (!node)
485 return;
486
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500487 tipc_node_write_lock(node);
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400488 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
489 if (port != conn->port)
490 continue;
491 list_del(&conn->list);
492 kfree(conn);
493 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500494 tipc_node_write_unlock(node);
Ying Xue8a0f6eb2015-03-26 18:10:24 +0800495 tipc_node_put(node);
Jon Paul Maloy02be61a2014-08-22 18:09:08 -0400496}
497
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400498/* tipc_node_timeout - handle expiration of node timer
499 */
Kees Cook31b102b2017-10-30 14:06:45 -0700500static void tipc_node_timeout(struct timer_list *t)
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400501{
Kees Cook31b102b2017-10-30 14:06:45 -0700502 struct tipc_node *n = from_timer(n, t, timer);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400503 struct tipc_link_entry *le;
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400504 struct sk_buff_head xmitq;
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400505 int bearer_id;
506 int rc = 0;
507
508 __skb_queue_head_init(&xmitq);
509
510 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500511 tipc_node_read_lock(n);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400512 le = &n->links[bearer_id];
Jon Paul Maloy2312bf62015-11-19 14:30:43 -0500513 spin_lock_bh(&le->lock);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400514 if (le->link) {
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400515 /* Link tolerance may change asynchronously: */
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400516 tipc_node_calculate_timer(n, le->link);
517 rc = tipc_link_timeout(le->link, &xmitq);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400518 }
Jon Paul Maloy2312bf62015-11-19 14:30:43 -0500519 spin_unlock_bh(&le->lock);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500520 tipc_node_read_unlock(n);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400521 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
522 if (rc & TIPC_LINK_DOWN_EVT)
523 tipc_node_link_down(n, bearer_id, false);
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400524 }
Jon Paul Maloy5ca509f2016-06-08 12:00:05 -0400525 mod_timer(&n->timer, jiffies + msecs_to_jiffies(n->keepalive_intv));
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400526}
527
Per Lidenb97bf3f2006-01-02 19:04:38 +0100528/**
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400529 * __tipc_node_link_up - handle addition of link
530 * Node lock must be held by caller
Per Lidenb97bf3f2006-01-02 19:04:38 +0100531 * Link becomes active (alone or shared) or standby, depending on its priority.
532 */
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400533static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
534 struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100535{
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400536 int *slot0 = &n->active_links[0];
537 int *slot1 = &n->active_links[1];
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400538 struct tipc_link *ol = node_active_link(n, 0);
539 struct tipc_link *nl = n->links[bearer_id].link;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100540
Jon Paul Maloye7142c32016-05-11 19:15:45 -0400541 if (!nl || tipc_link_is_up(nl))
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400542 return;
543
544 tipc_link_fsm_evt(nl, LINK_ESTABLISH_EVT);
545 if (!tipc_link_is_up(nl))
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400546 return;
547
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400548 n->working_links++;
549 n->action_flags |= TIPC_NOTIFY_LINK_UP;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500550 n->link_id = tipc_link_id(nl);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400551
552 /* Leave room for tunnel header when returning 'mtu' to users: */
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500553 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
Ying Xue7b8613e2014-10-20 14:44:25 +0800554
Jon Paul Maloycbeb83c2015-07-30 18:24:15 -0400555 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
Jon Paul Maloyb06b2812015-10-22 08:51:42 -0400556 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
Jon Paul Maloycbeb83c2015-07-30 18:24:15 -0400557
Erik Hugne3fa9cac2015-01-22 17:10:31 +0100558 pr_debug("Established link <%s> on network plane %c\n",
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500559 tipc_link_name(nl), tipc_link_plane(nl));
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900560
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -0400561 /* Ensure that a STATE message goes first */
562 tipc_link_build_state_msg(nl, xmitq);
563
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400564 /* First link? => give it both slots */
565 if (!ol) {
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400566 *slot0 = bearer_id;
567 *slot1 = bearer_id;
Jon Paul Maloy52666982015-10-22 08:51:41 -0400568 tipc_node_fsm_evt(n, SELF_ESTABL_CONTACT_EVT);
569 n->action_flags |= TIPC_NOTIFY_NODE_UP;
Jon Paul Maloydef22c42016-04-28 20:16:08 -0400570 tipc_link_set_active(nl, true);
Jon Paul Maloyb06b2812015-10-22 08:51:42 -0400571 tipc_bcast_add_peer(n->net, nl, xmitq);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400572 return;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100573 }
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400574
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400575 /* Second link => redistribute slots */
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500576 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
577 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400578 *slot0 = bearer_id;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400579 *slot1 = bearer_id;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400580 tipc_link_set_active(nl, true);
581 tipc_link_set_active(ol, false);
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500582 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400583 tipc_link_set_active(nl, true);
Jon Paul Maloyc49a0a842015-10-22 08:51:47 -0400584 *slot1 = bearer_id;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400585 } else {
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500586 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100587 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100588
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400589 /* Prepare synchronization with first link */
590 tipc_link_tnl_prepare(ol, nl, SYNCH_MSG, xmitq);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100591}
592
593/**
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400594 * tipc_node_link_up - handle addition of link
595 *
596 * Link becomes active (alone or shared) or standby, depending on its priority.
Per Lidenb97bf3f2006-01-02 19:04:38 +0100597 */
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400598static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
599 struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100600{
Jon Paul Maloyde7e07f2016-04-15 13:33:06 -0400601 struct tipc_media_addr *maddr;
602
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500603 tipc_node_write_lock(n);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400604 __tipc_node_link_up(n, bearer_id, xmitq);
Jon Paul Maloyde7e07f2016-04-15 13:33:06 -0400605 maddr = &n->links[bearer_id].maddr;
606 tipc_bearer_xmit(n->net, bearer_id, xmitq, maddr);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500607 tipc_node_write_unlock(n);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400608}
609
610/**
611 * __tipc_node_link_down - handle loss of link
612 */
613static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
614 struct sk_buff_head *xmitq,
615 struct tipc_media_addr **maddr)
616{
617 struct tipc_link_entry *le = &n->links[*bearer_id];
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400618 int *slot0 = &n->active_links[0];
619 int *slot1 = &n->active_links[1];
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500620 int i, highest = 0, prio;
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400621 struct tipc_link *l, *_l, *tnl;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100622
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400623 l = n->links[*bearer_id].link;
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400624 if (!l || tipc_link_is_reset(l))
Jon Paul Maloy655fb242015-07-30 18:24:17 -0400625 return;
626
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -0400627 n->working_links--;
628 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500629 n->link_id = tipc_link_id(l);
Allan Stephens5392d642006-06-25 23:52:50 -0700630
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400631 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
Jon Paul Maloy655fb242015-07-30 18:24:17 -0400632
Erik Hugne3fa9cac2015-01-22 17:10:31 +0100633 pr_debug("Lost link <%s> on network plane %c\n",
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500634 tipc_link_name(l), tipc_link_plane(l));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100635
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400636 /* Select new active link if any available */
637 *slot0 = INVALID_BEARER_ID;
638 *slot1 = INVALID_BEARER_ID;
639 for (i = 0; i < MAX_BEARERS; i++) {
640 _l = n->links[i].link;
641 if (!_l || !tipc_link_is_up(_l))
642 continue;
Jon Paul Maloy655fb242015-07-30 18:24:17 -0400643 if (_l == l)
644 continue;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500645 prio = tipc_link_prio(_l);
646 if (prio < highest)
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400647 continue;
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500648 if (prio > highest) {
649 highest = prio;
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400650 *slot0 = i;
651 *slot1 = i;
652 continue;
653 }
654 *slot1 = i;
655 }
Jon Paul Maloy655fb242015-07-30 18:24:17 -0400656
Jon Maloy38077b82017-10-13 11:04:19 +0200657 if (!node_is_up(n)) {
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400658 if (tipc_link_peer_is_down(l))
659 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
660 tipc_node_fsm_evt(n, SELF_LOST_CONTACT_EVT);
661 tipc_link_fsm_evt(l, LINK_RESET_EVT);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400662 tipc_link_reset(l);
Jon Paul Maloy282b3a02015-10-15 14:52:45 -0400663 tipc_link_build_reset_msg(l, xmitq);
664 *maddr = &n->links[*bearer_id].maddr;
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400665 node_lost_contact(n, &le->inputq);
Jon Paul Maloyb06b2812015-10-22 08:51:42 -0400666 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400667 return;
668 }
Jon Paul Maloyb06b2812015-10-22 08:51:42 -0400669 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400670
671 /* There is still a working link => initiate failover */
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500672 *bearer_id = n->active_links[0];
673 tnl = n->links[*bearer_id].link;
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -0400674 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
675 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500676 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400677 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -0400678 tipc_link_reset(l);
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400679 tipc_link_fsm_evt(l, LINK_RESET_EVT);
Jon Paul Maloy662921c2015-07-30 18:24:21 -0400680 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400681 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
Jon Paul Maloy38206d52015-11-19 14:30:46 -0500682 *maddr = &n->links[*bearer_id].maddr;
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400683}
684
685static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
686{
687 struct tipc_link_entry *le = &n->links[bearer_id];
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400688 struct tipc_link *l = le->link;
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400689 struct tipc_media_addr *maddr;
690 struct sk_buff_head xmitq;
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400691 int old_bearer_id = bearer_id;
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400692
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400693 if (!l)
694 return;
695
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400696 __skb_queue_head_init(&xmitq);
697
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500698 tipc_node_write_lock(n);
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400699 if (!tipc_link_is_establishing(l)) {
700 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
701 if (delete) {
702 kfree(l);
703 le->link = NULL;
704 n->link_cnt--;
705 }
706 } else {
707 /* Defuse pending tipc_node_link_up() */
708 tipc_link_fsm_evt(l, LINK_RESET_EVT);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400709 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500710 tipc_node_write_unlock(n);
Jon Paul Maloy35c55c92016-06-13 20:46:22 -0400711 if (delete)
712 tipc_mon_remove_peer(n->net, n->addr, old_bearer_id);
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400713 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
714 tipc_sk_rcv(n->net, &le->inputq);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100715}
716
Jon Maloy38077b82017-10-13 11:04:19 +0200717static bool node_is_up(struct tipc_node *n)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100718{
Jon Paul Maloy36e78a42015-07-16 16:54:22 -0400719 return n->active_links[0] != INVALID_BEARER_ID;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100720}
721
Jon Maloy38077b82017-10-13 11:04:19 +0200722bool tipc_node_is_up(struct net *net, u32 addr)
723{
724 struct tipc_node *n;
725 bool retval = false;
726
727 if (in_own_node(net, addr))
728 return true;
729
730 n = tipc_node_find(net, addr);
731 if (!n)
732 return false;
733 retval = node_is_up(n);
734 tipc_node_put(n);
735 return retval;
736}
737
Jon Paul Maloycf148812015-07-30 18:24:22 -0400738void tipc_node_check_dest(struct net *net, u32 onode,
739 struct tipc_bearer *b,
740 u16 capabilities, u32 signature,
741 struct tipc_media_addr *maddr,
742 bool *respond, bool *dupl_addr)
Jon Paul Maloyd3a43b92015-07-16 16:54:20 -0400743{
Jon Paul Maloycf148812015-07-30 18:24:22 -0400744 struct tipc_node *n;
745 struct tipc_link *l;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400746 struct tipc_link_entry *le;
Jon Paul Maloycf148812015-07-30 18:24:22 -0400747 bool addr_match = false;
748 bool sign_match = false;
749 bool link_up = false;
750 bool accept_addr = false;
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400751 bool reset = true;
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400752 char *if_name;
Jon Paul Maloy5ca509f2016-06-08 12:00:05 -0400753 unsigned long intv;
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400754
Jon Paul Maloycf148812015-07-30 18:24:22 -0400755 *dupl_addr = false;
756 *respond = false;
Jon Paul Maloyd3a43b92015-07-16 16:54:20 -0400757
Jon Paul Maloycf148812015-07-30 18:24:22 -0400758 n = tipc_node_create(net, onode, capabilities);
759 if (!n)
760 return;
Jon Paul Maloyd3a43b92015-07-16 16:54:20 -0400761
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500762 tipc_node_write_lock(n);
Jon Paul Maloycf148812015-07-30 18:24:22 -0400763
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400764 le = &n->links[b->identity];
Jon Paul Maloycf148812015-07-30 18:24:22 -0400765
766 /* Prepare to validate requesting node's signature and media address */
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400767 l = le->link;
Jon Paul Maloycf148812015-07-30 18:24:22 -0400768 link_up = l && tipc_link_is_up(l);
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400769 addr_match = l && !memcmp(&le->maddr, maddr, sizeof(*maddr));
Jon Paul Maloycf148812015-07-30 18:24:22 -0400770 sign_match = (signature == n->signature);
771
772 /* These three flags give us eight permutations: */
773
774 if (sign_match && addr_match && link_up) {
775 /* All is fine. Do nothing. */
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400776 reset = false;
Jon Paul Maloycf148812015-07-30 18:24:22 -0400777 } else if (sign_match && addr_match && !link_up) {
778 /* Respond. The link will come up in due time */
779 *respond = true;
780 } else if (sign_match && !addr_match && link_up) {
781 /* Peer has changed i/f address without rebooting.
782 * If so, the link will reset soon, and the next
783 * discovery will be accepted. So we can ignore it.
784 * It may also be an cloned or malicious peer having
785 * chosen the same node address and signature as an
786 * existing one.
787 * Ignore requests until the link goes down, if ever.
788 */
789 *dupl_addr = true;
790 } else if (sign_match && !addr_match && !link_up) {
791 /* Peer link has changed i/f address without rebooting.
792 * It may also be a cloned or malicious peer; we can't
793 * distinguish between the two.
794 * The signature is correct, so we must accept.
795 */
796 accept_addr = true;
797 *respond = true;
798 } else if (!sign_match && addr_match && link_up) {
799 /* Peer node rebooted. Two possibilities:
800 * - Delayed re-discovery; this link endpoint has already
801 * reset and re-established contact with the peer, before
802 * receiving a discovery message from that node.
803 * (The peer happened to receive one from this node first).
804 * - The peer came back so fast that our side has not
805 * discovered it yet. Probing from this side will soon
806 * reset the link, since there can be no working link
807 * endpoint at the peer end, and the link will re-establish.
808 * Accept the signature, since it comes from a known peer.
809 */
810 n->signature = signature;
811 } else if (!sign_match && addr_match && !link_up) {
812 /* The peer node has rebooted.
813 * Accept signature, since it is a known peer.
814 */
815 n->signature = signature;
816 *respond = true;
817 } else if (!sign_match && !addr_match && link_up) {
818 /* Peer rebooted with new address, or a new/duplicate peer.
819 * Ignore until the link goes down, if ever.
820 */
821 *dupl_addr = true;
822 } else if (!sign_match && !addr_match && !link_up) {
823 /* Peer rebooted with new address, or it is a new peer.
824 * Accept signature and address.
825 */
826 n->signature = signature;
827 accept_addr = true;
828 *respond = true;
829 }
830
831 if (!accept_addr)
832 goto exit;
833
834 /* Now create new link if not already existing */
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400835 if (!l) {
Jon Maloy20263642018-03-22 20:42:47 +0100836 if (n->link_cnt == 2)
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400837 goto exit;
Jon Maloy20263642018-03-22 20:42:47 +0100838
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400839 if_name = strchr(b->name, ':') + 1;
Jon Paul Maloyc72fa872015-10-22 08:51:46 -0400840 if (!tipc_link_create(net, if_name, b->identity, b->tolerance,
Jon Paul Maloy0e054982015-10-22 08:51:36 -0400841 b->net_plane, b->mtu, b->priority,
842 b->window, mod(tipc_net(net)->random),
Jon Paul Maloyfd556f22015-10-22 08:51:40 -0400843 tipc_own_addr(net), onode,
Jon Paul Maloy2af5ae32015-10-22 08:51:48 -0400844 n->capabilities,
Jon Paul Maloy52666982015-10-22 08:51:41 -0400845 tipc_bc_sndlink(n->net), n->bc_entry.link,
846 &le->inputq,
847 &n->bc_entry.namedq, &l)) {
Jon Paul Maloycf148812015-07-30 18:24:22 -0400848 *respond = false;
849 goto exit;
850 }
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400851 tipc_link_reset(l);
Jon Paul Maloyc8199302015-10-15 14:52:46 -0400852 tipc_link_fsm_evt(l, LINK_RESET_EVT);
Jon Paul Maloy17b20632015-08-20 02:12:54 -0400853 if (n->state == NODE_FAILINGOVER)
854 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400855 le->link = l;
856 n->link_cnt++;
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400857 tipc_node_calculate_timer(n, l);
Jon Paul Maloy5ca509f2016-06-08 12:00:05 -0400858 if (n->link_cnt == 1) {
859 intv = jiffies + msecs_to_jiffies(n->keepalive_intv);
860 if (!mod_timer(&n->timer, intv))
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400861 tipc_node_get(n);
Jon Paul Maloy5ca509f2016-06-08 12:00:05 -0400862 }
Jon Paul Maloy8a1577c2015-07-16 16:54:29 -0400863 }
Jon Paul Maloy440d8962015-07-30 18:24:26 -0400864 memcpy(&le->maddr, maddr, sizeof(*maddr));
Jon Paul Maloycf148812015-07-30 18:24:22 -0400865exit:
Jon Paul Maloy5405ff62015-11-19 14:30:44 -0500866 tipc_node_write_unlock(n);
Richard Alpe2837f392016-03-03 14:20:41 +0100867 if (reset && l && !tipc_link_is_reset(l))
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400868 tipc_node_link_down(n, b->identity, false);
Jon Paul Maloycf148812015-07-30 18:24:22 -0400869 tipc_node_put(n);
Jon Paul Maloyd3a43b92015-07-16 16:54:20 -0400870}
871
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400872void tipc_node_delete_links(struct net *net, int bearer_id)
873{
874 struct tipc_net *tn = net_generic(net, tipc_net_id);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400875 struct tipc_node *n;
876
877 rcu_read_lock();
878 list_for_each_entry_rcu(n, &tn->node_list, list) {
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400879 tipc_node_link_down(n, bearer_id, true);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400880 }
881 rcu_read_unlock();
882}
883
884static void tipc_node_reset_links(struct tipc_node *n)
885{
886 char addr_string[16];
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400887 int i;
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400888
889 pr_warn("Resetting all links to %s\n",
890 tipc_addr_string_fill(addr_string, n->addr));
891
892 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy598411d2015-07-30 18:24:23 -0400893 tipc_node_link_down(n, i, false);
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400894 }
Jon Paul Maloy6144a992015-07-30 18:24:16 -0400895}
896
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400897/* tipc_node_fsm_evt - node finite state machine
898 * Determines when contact is allowed with peer node
899 */
Jon Paul Maloyd9992972015-07-16 16:54:31 -0400900static void tipc_node_fsm_evt(struct tipc_node *n, int evt)
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400901{
902 int state = n->state;
903
904 switch (state) {
905 case SELF_DOWN_PEER_DOWN:
906 switch (evt) {
907 case SELF_ESTABL_CONTACT_EVT:
908 state = SELF_UP_PEER_COMING;
909 break;
910 case PEER_ESTABL_CONTACT_EVT:
911 state = SELF_COMING_PEER_UP;
912 break;
913 case SELF_LOST_CONTACT_EVT:
914 case PEER_LOST_CONTACT_EVT:
915 break;
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400916 case NODE_SYNCH_END_EVT:
917 case NODE_SYNCH_BEGIN_EVT:
918 case NODE_FAILOVER_BEGIN_EVT:
919 case NODE_FAILOVER_END_EVT:
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400920 default:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400921 goto illegal_evt;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400922 }
923 break;
924 case SELF_UP_PEER_UP:
925 switch (evt) {
926 case SELF_LOST_CONTACT_EVT:
927 state = SELF_DOWN_PEER_LEAVING;
928 break;
929 case PEER_LOST_CONTACT_EVT:
930 state = SELF_LEAVING_PEER_DOWN;
931 break;
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400932 case NODE_SYNCH_BEGIN_EVT:
933 state = NODE_SYNCHING;
934 break;
935 case NODE_FAILOVER_BEGIN_EVT:
936 state = NODE_FAILINGOVER;
937 break;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400938 case SELF_ESTABL_CONTACT_EVT:
939 case PEER_ESTABL_CONTACT_EVT:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400940 case NODE_SYNCH_END_EVT:
941 case NODE_FAILOVER_END_EVT:
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400942 break;
943 default:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400944 goto illegal_evt;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400945 }
946 break;
947 case SELF_DOWN_PEER_LEAVING:
948 switch (evt) {
949 case PEER_LOST_CONTACT_EVT:
950 state = SELF_DOWN_PEER_DOWN;
951 break;
952 case SELF_ESTABL_CONTACT_EVT:
953 case PEER_ESTABL_CONTACT_EVT:
954 case SELF_LOST_CONTACT_EVT:
955 break;
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400956 case NODE_SYNCH_END_EVT:
957 case NODE_SYNCH_BEGIN_EVT:
958 case NODE_FAILOVER_BEGIN_EVT:
959 case NODE_FAILOVER_END_EVT:
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400960 default:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400961 goto illegal_evt;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400962 }
963 break;
964 case SELF_UP_PEER_COMING:
965 switch (evt) {
966 case PEER_ESTABL_CONTACT_EVT:
967 state = SELF_UP_PEER_UP;
968 break;
969 case SELF_LOST_CONTACT_EVT:
Jon Paul Maloyc4282ca2016-06-08 12:00:04 -0400970 state = SELF_DOWN_PEER_DOWN;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400971 break;
972 case SELF_ESTABL_CONTACT_EVT:
973 case PEER_LOST_CONTACT_EVT:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400974 case NODE_SYNCH_END_EVT:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400975 case NODE_FAILOVER_BEGIN_EVT:
Jon Paul Maloy73f646c2015-10-15 14:52:44 -0400976 break;
977 case NODE_SYNCH_BEGIN_EVT:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400978 case NODE_FAILOVER_END_EVT:
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400979 default:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400980 goto illegal_evt;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400981 }
982 break;
983 case SELF_COMING_PEER_UP:
984 switch (evt) {
985 case SELF_ESTABL_CONTACT_EVT:
986 state = SELF_UP_PEER_UP;
987 break;
988 case PEER_LOST_CONTACT_EVT:
Jon Paul Maloyc4282ca2016-06-08 12:00:04 -0400989 state = SELF_DOWN_PEER_DOWN;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400990 break;
991 case SELF_LOST_CONTACT_EVT:
992 case PEER_ESTABL_CONTACT_EVT:
993 break;
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400994 case NODE_SYNCH_END_EVT:
995 case NODE_SYNCH_BEGIN_EVT:
996 case NODE_FAILOVER_BEGIN_EVT:
997 case NODE_FAILOVER_END_EVT:
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -0400998 default:
Jon Paul Maloy66996b62015-07-30 18:24:18 -0400999 goto illegal_evt;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001000 }
1001 break;
1002 case SELF_LEAVING_PEER_DOWN:
1003 switch (evt) {
1004 case SELF_LOST_CONTACT_EVT:
1005 state = SELF_DOWN_PEER_DOWN;
1006 break;
1007 case SELF_ESTABL_CONTACT_EVT:
1008 case PEER_ESTABL_CONTACT_EVT:
1009 case PEER_LOST_CONTACT_EVT:
1010 break;
Jon Paul Maloy66996b62015-07-30 18:24:18 -04001011 case NODE_SYNCH_END_EVT:
1012 case NODE_SYNCH_BEGIN_EVT:
1013 case NODE_FAILOVER_BEGIN_EVT:
1014 case NODE_FAILOVER_END_EVT:
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001015 default:
Jon Paul Maloy66996b62015-07-30 18:24:18 -04001016 goto illegal_evt;
1017 }
1018 break;
1019 case NODE_FAILINGOVER:
1020 switch (evt) {
1021 case SELF_LOST_CONTACT_EVT:
1022 state = SELF_DOWN_PEER_LEAVING;
1023 break;
1024 case PEER_LOST_CONTACT_EVT:
1025 state = SELF_LEAVING_PEER_DOWN;
1026 break;
1027 case NODE_FAILOVER_END_EVT:
1028 state = SELF_UP_PEER_UP;
1029 break;
1030 case NODE_FAILOVER_BEGIN_EVT:
1031 case SELF_ESTABL_CONTACT_EVT:
1032 case PEER_ESTABL_CONTACT_EVT:
1033 break;
1034 case NODE_SYNCH_BEGIN_EVT:
1035 case NODE_SYNCH_END_EVT:
1036 default:
1037 goto illegal_evt;
1038 }
1039 break;
1040 case NODE_SYNCHING:
1041 switch (evt) {
1042 case SELF_LOST_CONTACT_EVT:
1043 state = SELF_DOWN_PEER_LEAVING;
1044 break;
1045 case PEER_LOST_CONTACT_EVT:
1046 state = SELF_LEAVING_PEER_DOWN;
1047 break;
1048 case NODE_SYNCH_END_EVT:
1049 state = SELF_UP_PEER_UP;
1050 break;
1051 case NODE_FAILOVER_BEGIN_EVT:
1052 state = NODE_FAILINGOVER;
1053 break;
1054 case NODE_SYNCH_BEGIN_EVT:
1055 case SELF_ESTABL_CONTACT_EVT:
1056 case PEER_ESTABL_CONTACT_EVT:
1057 break;
1058 case NODE_FAILOVER_END_EVT:
1059 default:
1060 goto illegal_evt;
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001061 }
1062 break;
1063 default:
1064 pr_err("Unknown node fsm state %x\n", state);
1065 break;
1066 }
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001067 n->state = state;
Jon Paul Maloy66996b62015-07-30 18:24:18 -04001068 return;
1069
1070illegal_evt:
1071 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
Jon Paul Maloy1a20cc22015-07-16 16:54:30 -04001072}
1073
Jon Paul Maloy52666982015-10-22 08:51:41 -04001074static void node_lost_contact(struct tipc_node *n,
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001075 struct sk_buff_head *inputq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001076{
Per Lidenb97bf3f2006-01-02 19:04:38 +01001077 char addr_string[16];
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001078 struct tipc_sock_conn *conn, *safe;
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001079 struct tipc_link *l;
Jon Paul Maloy52666982015-10-22 08:51:41 -04001080 struct list_head *conns = &n->conn_sks;
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001081 struct sk_buff *skb;
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001082 uint i;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001083
Erik Hugne3fa9cac2015-01-22 17:10:31 +01001084 pr_debug("Lost contact with %s\n",
Jon Paul Maloy52666982015-10-22 08:51:41 -04001085 tipc_addr_string_fill(addr_string, n->addr));
Allan Stephensc5bd4d82011-04-07 11:58:08 -04001086
Jon Paul Maloy52666982015-10-22 08:51:41 -04001087 /* Clean up broadcast state */
Jon Paul Maloyb06b2812015-10-22 08:51:42 -04001088 tipc_bcast_remove_peer(n->net, n->bc_entry.link);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001089
Jon Paul Maloydff29b12015-04-02 09:33:01 -04001090 /* Abort any ongoing link failover */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001091 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001092 l = n->links[i].link;
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001093 if (l)
1094 tipc_link_fsm_evt(l, LINK_FAILOVER_END_EVT);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001095 }
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001096
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001097 /* Notify publications from this node */
Jon Paul Maloy52666982015-10-22 08:51:41 -04001098 n->action_flags |= TIPC_NOTIFY_NODE_DOWN;
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001099
1100 /* Notify sockets connected to node */
1101 list_for_each_entry_safe(conn, safe, conns, list) {
1102 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
Jon Paul Maloy52666982015-10-22 08:51:41 -04001103 SHORT_H_SIZE, 0, tipc_own_addr(n->net),
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001104 conn->peer_node, conn->port,
1105 conn->peer_port, TIPC_ERR_NO_NODE);
Jon Paul Maloy23d83352015-07-30 18:24:24 -04001106 if (likely(skb))
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001107 skb_queue_tail(inputq, skb);
Jon Paul Maloy708ac322015-02-05 08:36:42 -05001108 list_del(&conn->list);
1109 kfree(conn);
1110 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001111}
1112
Erik Hugne78acb1f2014-04-24 16:26:47 +02001113/**
1114 * tipc_node_get_linkname - get the name of a link
1115 *
1116 * @bearer_id: id of the bearer
1117 * @node: peer node address
1118 * @linkname: link name output buffer
1119 *
1120 * Returns 0 on success
1121 */
Ying Xuef2f98002015-01-09 15:27:05 +08001122int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
1123 char *linkname, size_t len)
Erik Hugne78acb1f2014-04-24 16:26:47 +02001124{
1125 struct tipc_link *link;
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001126 int err = -EINVAL;
Ying Xuef2f98002015-01-09 15:27:05 +08001127 struct tipc_node *node = tipc_node_find(net, addr);
Erik Hugne78acb1f2014-04-24 16:26:47 +02001128
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001129 if (!node)
1130 return err;
1131
1132 if (bearer_id >= MAX_BEARERS)
1133 goto exit;
1134
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001135 tipc_node_read_lock(node);
Jon Paul Maloy9d13ec62015-07-16 16:54:19 -04001136 link = node->links[bearer_id].link;
Erik Hugne78acb1f2014-04-24 16:26:47 +02001137 if (link) {
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001138 strncpy(linkname, tipc_link_name(link), len);
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001139 err = 0;
Erik Hugne78acb1f2014-04-24 16:26:47 +02001140 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001141 tipc_node_read_unlock(node);
Parthasarathy Bhuvaragan991ca842017-08-24 16:31:24 +02001142exit:
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001143 tipc_node_put(node);
1144 return err;
Erik Hugne78acb1f2014-04-24 16:26:47 +02001145}
Ying Xue9db9fdd2014-05-05 08:56:12 +08001146
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001147/* Caller should hold node lock for the passed node */
Richard Alped8182802014-11-24 11:10:29 +01001148static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001149{
1150 void *hdr;
1151 struct nlattr *attrs;
1152
Richard Alpebfb3e5d2015-02-09 09:50:03 +01001153 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001154 NLM_F_MULTI, TIPC_NL_NODE_GET);
1155 if (!hdr)
1156 return -EMSGSIZE;
1157
1158 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
1159 if (!attrs)
1160 goto msg_full;
1161
1162 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
1163 goto attr_msg_full;
Jon Maloy38077b82017-10-13 11:04:19 +02001164 if (node_is_up(node))
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001165 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
1166 goto attr_msg_full;
1167
1168 nla_nest_end(msg->skb, attrs);
1169 genlmsg_end(msg->skb, hdr);
1170
1171 return 0;
1172
1173attr_msg_full:
1174 nla_nest_cancel(msg->skb, attrs);
1175msg_full:
1176 genlmsg_cancel(msg->skb, hdr);
1177
1178 return -EMSGSIZE;
1179}
1180
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001181/**
1182 * tipc_node_xmit() is the general link level function for message sending
1183 * @net: the applicable net namespace
1184 * @list: chain of buffers containing message
1185 * @dnode: address of destination node
1186 * @selector: a number used for deterministic link selection
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001187 * Consumes the buffer chain.
Richard Alpe4952cd32016-02-11 10:43:15 +01001188 * Returns 0 if success, otherwise: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE,-ENOBUF
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001189 */
1190int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1191 u32 dnode, int selector)
1192{
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001193 struct tipc_link_entry *le = NULL;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001194 struct tipc_node *n;
1195 struct sk_buff_head xmitq;
Richard Alpe4952cd32016-02-11 10:43:15 +01001196 int bearer_id;
1197 int rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001198
Richard Alpe4952cd32016-02-11 10:43:15 +01001199 if (in_own_node(net, dnode)) {
Jon Paul Maloydc8d1eb2015-12-02 15:19:37 -05001200 tipc_sk_rcv(net, list);
1201 return 0;
1202 }
Richard Alpe4952cd32016-02-11 10:43:15 +01001203
1204 n = tipc_node_find(net, dnode);
1205 if (unlikely(!n)) {
1206 skb_queue_purge(list);
1207 return -EHOSTUNREACH;
1208 }
1209
1210 tipc_node_read_lock(n);
1211 bearer_id = n->active_links[selector & 1];
1212 if (unlikely(bearer_id == INVALID_BEARER_ID)) {
1213 tipc_node_read_unlock(n);
1214 tipc_node_put(n);
1215 skb_queue_purge(list);
1216 return -EHOSTUNREACH;
1217 }
1218
1219 __skb_queue_head_init(&xmitq);
1220 le = &n->links[bearer_id];
1221 spin_lock_bh(&le->lock);
1222 rc = tipc_link_xmit(le->link, list, &xmitq);
1223 spin_unlock_bh(&le->lock);
1224 tipc_node_read_unlock(n);
1225
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001226 if (unlikely(rc == -ENOBUFS))
Richard Alpe4952cd32016-02-11 10:43:15 +01001227 tipc_node_link_down(n, bearer_id, false);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001228 else
1229 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
Richard Alpe4952cd32016-02-11 10:43:15 +01001230
1231 tipc_node_put(n);
1232
Jon Paul Maloydc8d1eb2015-12-02 15:19:37 -05001233 return rc;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001234}
1235
1236/* tipc_node_xmit_skb(): send single buffer to destination
1237 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
1238 * messages, which will not be rejected
1239 * The only exception is datagram messages rerouted after secondary
1240 * lookup, which are rare and safe to dispose of anyway.
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001241 */
1242int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1243 u32 selector)
1244{
1245 struct sk_buff_head head;
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001246
1247 skb_queue_head_init(&head);
1248 __skb_queue_tail(&head, skb);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001249 tipc_node_xmit(net, &head, dnode, selector);
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04001250 return 0;
1251}
1252
Jon Maloyf70d37b2017-10-13 11:04:21 +02001253/* tipc_node_distr_xmit(): send single buffer msgs to individual destinations
1254 * Note: this is only for SYSTEM_IMPORTANCE messages, which cannot be rejected
1255 */
1256int tipc_node_distr_xmit(struct net *net, struct sk_buff_head *xmitq)
1257{
1258 struct sk_buff *skb;
1259 u32 selector, dnode;
1260
1261 while ((skb = __skb_dequeue(xmitq))) {
1262 selector = msg_origport(buf_msg(skb));
1263 dnode = msg_destnode(buf_msg(skb));
1264 tipc_node_xmit_skb(net, skb, dnode, selector);
1265 }
1266 return 0;
1267}
1268
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -05001269void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1270{
1271 struct sk_buff *txskb;
1272 struct tipc_node *n;
1273 u32 dst;
1274
1275 rcu_read_lock();
1276 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1277 dst = n->addr;
1278 if (in_own_node(net, dst))
1279 continue;
Jon Maloy38077b82017-10-13 11:04:19 +02001280 if (!node_is_up(n))
Jon Paul Maloy1d7e1c22015-11-19 14:30:42 -05001281 continue;
1282 txskb = pskb_copy(skb, GFP_ATOMIC);
1283 if (!txskb)
1284 break;
1285 msg_set_destnode(buf_msg(txskb), dst);
1286 tipc_node_xmit_skb(net, txskb, dst, 0);
1287 }
1288 rcu_read_unlock();
1289
1290 kfree_skb(skb);
1291}
1292
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001293static void tipc_node_mcast_rcv(struct tipc_node *n)
1294{
1295 struct tipc_bclink_entry *be = &n->bc_entry;
1296
1297 /* 'arrvq' is under inputq2's lock protection */
1298 spin_lock_bh(&be->inputq2.lock);
1299 spin_lock_bh(&be->inputq1.lock);
1300 skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
1301 spin_unlock_bh(&be->inputq1.lock);
1302 spin_unlock_bh(&be->inputq2.lock);
1303 tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
1304}
1305
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001306static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
1307 int bearer_id, struct sk_buff_head *xmitq)
1308{
1309 struct tipc_link *ucl;
1310 int rc;
1311
1312 rc = tipc_bcast_sync_rcv(n->net, n->bc_entry.link, hdr);
1313
1314 if (rc & TIPC_LINK_DOWN_EVT) {
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001315 tipc_node_reset_links(n);
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001316 return;
1317 }
1318
1319 if (!(rc & TIPC_LINK_SND_STATE))
1320 return;
1321
1322 /* If probe message, a STATE response will be sent anyway */
1323 if (msg_probe(hdr))
1324 return;
1325
1326 /* Produce a STATE message carrying broadcast NACK */
1327 tipc_node_read_lock(n);
1328 ucl = n->links[bearer_id].link;
1329 if (ucl)
1330 tipc_link_build_state_msg(ucl, xmitq);
1331 tipc_node_read_unlock(n);
1332}
1333
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001334/**
Jon Paul Maloy52666982015-10-22 08:51:41 -04001335 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1336 * @net: the applicable net namespace
1337 * @skb: TIPC packet
1338 * @bearer_id: id of bearer message arrived on
1339 *
1340 * Invoked with no locks held.
1341 */
Wu Fengguang742e0382015-10-24 22:56:01 +08001342static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id)
Jon Paul Maloy52666982015-10-22 08:51:41 -04001343{
1344 int rc;
1345 struct sk_buff_head xmitq;
1346 struct tipc_bclink_entry *be;
1347 struct tipc_link_entry *le;
1348 struct tipc_msg *hdr = buf_msg(skb);
1349 int usr = msg_user(hdr);
1350 u32 dnode = msg_destnode(hdr);
1351 struct tipc_node *n;
1352
1353 __skb_queue_head_init(&xmitq);
1354
1355 /* If NACK for other node, let rcv link for that node peek into it */
1356 if ((usr == BCAST_PROTOCOL) && (dnode != tipc_own_addr(net)))
1357 n = tipc_node_find(net, dnode);
1358 else
1359 n = tipc_node_find(net, msg_prevnode(hdr));
1360 if (!n) {
1361 kfree_skb(skb);
1362 return;
1363 }
1364 be = &n->bc_entry;
1365 le = &n->links[bearer_id];
1366
1367 rc = tipc_bcast_rcv(net, be->link, skb);
1368
Jon Paul Maloy52666982015-10-22 08:51:41 -04001369 /* Broadcast ACKs are sent on a unicast link */
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001370 if (rc & TIPC_LINK_SND_STATE) {
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001371 tipc_node_read_lock(n);
Jon Paul Maloy34b9cd62016-04-15 13:33:07 -04001372 tipc_link_build_state_msg(le->link, &xmitq);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001373 tipc_node_read_unlock(n);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001374 }
1375
1376 if (!skb_queue_empty(&xmitq))
1377 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1378
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001379 if (!skb_queue_empty(&be->inputq1))
1380 tipc_node_mcast_rcv(n);
Jon Paul Maloy1fc07f32016-07-11 16:08:37 -04001381
Jon Paul Maloy40501f92017-08-21 17:59:30 +02001382 /* If reassembly or retransmission failure => reset all links to peer */
1383 if (rc & TIPC_LINK_DOWN_EVT)
1384 tipc_node_reset_links(n);
Jon Paul Maloy1fc07f32016-07-11 16:08:37 -04001385
Jon Paul Maloy52666982015-10-22 08:51:41 -04001386 tipc_node_put(n);
1387}
1388
1389/**
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001390 * tipc_node_check_state - check and if necessary update node state
1391 * @skb: TIPC packet
1392 * @bearer_id: identity of bearer delivering the packet
1393 * Returns true if state is ok, otherwise consumes buffer and returns false
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001394 */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001395static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001396 int bearer_id, struct sk_buff_head *xmitq)
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001397{
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001398 struct tipc_msg *hdr = buf_msg(skb);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001399 int usr = msg_user(hdr);
1400 int mtyp = msg_type(hdr);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001401 u16 oseqno = msg_seqno(hdr);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001402 u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
1403 u16 exp_pkts = msg_msgcnt(hdr);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001404 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001405 int state = n->state;
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001406 struct tipc_link *l, *tnl, *pl = NULL;
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001407 struct tipc_media_addr *maddr;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001408 int pb_id;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001409
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001410 l = n->links[bearer_id].link;
1411 if (!l)
1412 return false;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001413 rcv_nxt = tipc_link_rcv_nxt(l);
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001414
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001415
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001416 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1417 return true;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001418
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001419 /* Find parallel link, if any */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001420 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1421 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1422 pl = n->links[pb_id].link;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001423 break;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001424 }
1425 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001426
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001427 /* Check and update node accesibility if applicable */
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001428 if (state == SELF_UP_PEER_COMING) {
1429 if (!tipc_link_is_up(l))
1430 return true;
1431 if (!msg_peer_link_is_up(hdr))
1432 return true;
1433 tipc_node_fsm_evt(n, PEER_ESTABL_CONTACT_EVT);
1434 }
1435
1436 if (state == SELF_DOWN_PEER_LEAVING) {
1437 if (msg_peer_node_is_up(hdr))
1438 return false;
1439 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
Jon Paul Maloy5c10e972015-11-19 14:30:41 -05001440 return true;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001441 }
1442
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001443 if (state == SELF_LEAVING_PEER_DOWN)
1444 return false;
1445
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001446 /* Ignore duplicate packets */
Jon Paul Maloy0f8b8e22015-10-13 12:41:51 -04001447 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001448 return true;
1449
1450 /* Initiate or update failover mode if applicable */
1451 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1452 syncpt = oseqno + exp_pkts - 1;
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001453 if (pl && tipc_link_is_up(pl)) {
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001454 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001455 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1456 tipc_link_inputq(l));
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001457 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001458 /* If pkts arrive out of order, use lowest calculated syncpt */
1459 if (less(syncpt, n->sync_point))
1460 n->sync_point = syncpt;
1461 }
1462
1463 /* Open parallel link when tunnel link reaches synch point */
Jon Paul Maloy17b20632015-08-20 02:12:54 -04001464 if ((n->state == NODE_FAILINGOVER) && tipc_link_is_up(l)) {
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001465 if (!more(rcv_nxt, n->sync_point))
1466 return true;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001467 tipc_node_fsm_evt(n, NODE_FAILOVER_END_EVT);
1468 if (pl)
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001469 tipc_link_fsm_evt(pl, LINK_FAILOVER_END_EVT);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001470 return true;
1471 }
1472
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -04001473 /* No synching needed if only one link */
1474 if (!pl || !tipc_link_is_up(pl))
1475 return true;
1476
Jon Paul Maloy0f8b8e22015-10-13 12:41:51 -04001477 /* Initiate synch mode if applicable */
1478 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG) && (oseqno == 1)) {
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001479 syncpt = iseqno + exp_pkts - 1;
Jon Paul Maloyed435942017-08-08 22:23:56 +02001480 if (!tipc_link_is_up(l))
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001481 __tipc_node_link_up(n, bearer_id, xmitq);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001482 if (n->state == SELF_UP_PEER_UP) {
1483 n->sync_point = syncpt;
Jon Paul Maloy662921c2015-07-30 18:24:21 -04001484 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001485 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1486 }
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001487 }
1488
1489 /* Open tunnel link when parallel link reaches synch point */
Jon Paul Maloy5c10e972015-11-19 14:30:41 -05001490 if (n->state == NODE_SYNCHING) {
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001491 if (tipc_link_is_synching(l)) {
1492 tnl = l;
1493 } else {
1494 tnl = pl;
1495 pl = l;
1496 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001497 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1498 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
Jon Paul Maloy5ae2f8e2015-08-20 02:12:55 -04001499 if (more(dlv_nxt, n->sync_point)) {
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001500 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001501 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001502 return true;
1503 }
Jon Paul Maloy2be80c22015-08-20 02:12:56 -04001504 if (l == pl)
1505 return true;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001506 if ((usr == TUNNEL_PROTOCOL) && (mtyp == SYNCH_MSG))
1507 return true;
1508 if (usr == LINK_PROTOCOL)
1509 return true;
1510 return false;
1511 }
1512 return true;
Jon Paul Maloy6144a992015-07-30 18:24:16 -04001513}
1514
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001515/**
1516 * tipc_rcv - process TIPC packets/messages arriving from off-node
1517 * @net: the applicable net namespace
1518 * @skb: TIPC packet
1519 * @bearer: pointer to bearer message arrived on
1520 *
1521 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1522 * structure (i.e. cannot be NULL), but bearer can be inactive.
1523 */
1524void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1525{
1526 struct sk_buff_head xmitq;
1527 struct tipc_node *n;
Jon Paul Maloy681a55d2017-02-23 11:10:31 -05001528 struct tipc_msg *hdr;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001529 int bearer_id = b->identity;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001530 struct tipc_link_entry *le;
Hamish Martinefe79052016-04-29 10:40:24 -04001531 u32 self = tipc_own_addr(net);
Jon Paul Maloy681a55d2017-02-23 11:10:31 -05001532 int usr, rc = 0;
1533 u16 bc_ack;
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001534
1535 __skb_queue_head_init(&xmitq);
1536
Jon Paul Maloy681a55d2017-02-23 11:10:31 -05001537 /* Ensure message is well-formed before touching the header */
Jon Maloyd618d092017-11-15 21:23:56 +01001538 if (unlikely(!tipc_msg_validate(&skb)))
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001539 goto discard;
Jon Paul Maloy681a55d2017-02-23 11:10:31 -05001540 hdr = buf_msg(skb);
1541 usr = msg_user(hdr);
1542 bc_ack = msg_bcast_ack(hdr);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001543
Jon Paul Maloy52666982015-10-22 08:51:41 -04001544 /* Handle arrival of discovery or broadcast packet */
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001545 if (unlikely(msg_non_seq(hdr))) {
Jon Paul Maloy52666982015-10-22 08:51:41 -04001546 if (unlikely(usr == LINK_CONFIG))
1547 return tipc_disc_rcv(net, skb, b);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001548 else
Jon Paul Maloy52666982015-10-22 08:51:41 -04001549 return tipc_node_bc_rcv(net, skb, bearer_id);
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001550 }
1551
Hamish Martinefe79052016-04-29 10:40:24 -04001552 /* Discard unicast link messages destined for another node */
1553 if (unlikely(!msg_short(hdr) && (msg_destnode(hdr) != self)))
1554 goto discard;
1555
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001556 /* Locate neighboring node that sent packet */
1557 n = tipc_node_find(net, msg_prevnode(hdr));
1558 if (unlikely(!n))
1559 goto discard;
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001560 le = &n->links[bearer_id];
1561
Jon Paul Maloy52666982015-10-22 08:51:41 -04001562 /* Ensure broadcast reception is in synch with peer's send state */
1563 if (unlikely(usr == LINK_PROTOCOL))
Jon Paul Maloy02d11ca2016-09-01 13:52:49 -04001564 tipc_node_bc_sync_rcv(n, hdr, bearer_id, &xmitq);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001565 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
Jon Paul Maloy06bd2b12016-10-27 18:51:55 -04001566 tipc_bcast_ack_rcv(net, n->bc_entry.link, hdr);
Jon Paul Maloy52666982015-10-22 08:51:41 -04001567
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001568 /* Receive packet directly if conditions permit */
1569 tipc_node_read_lock(n);
1570 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
Jon Paul Maloy2312bf62015-11-19 14:30:43 -05001571 spin_lock_bh(&le->lock);
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001572 if (le->link) {
1573 rc = tipc_link_rcv(le->link, skb, &xmitq);
1574 skb = NULL;
1575 }
Jon Paul Maloy2312bf62015-11-19 14:30:43 -05001576 spin_unlock_bh(&le->lock);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001577 }
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001578 tipc_node_read_unlock(n);
1579
1580 /* Check/update node state before receiving */
1581 if (unlikely(skb)) {
Parthasarathy Bhuvaragan27163132017-08-24 16:31:22 +02001582 if (unlikely(skb_linearize(skb)))
1583 goto discard;
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001584 tipc_node_write_lock(n);
1585 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1586 if (le->link) {
1587 rc = tipc_link_rcv(le->link, skb, &xmitq);
1588 skb = NULL;
1589 }
1590 }
1591 tipc_node_write_unlock(n);
1592 }
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001593
1594 if (unlikely(rc & TIPC_LINK_UP_EVT))
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001595 tipc_node_link_up(n, bearer_id, &xmitq);
1596
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001597 if (unlikely(rc & TIPC_LINK_DOWN_EVT))
Jon Paul Maloy598411d2015-07-30 18:24:23 -04001598 tipc_node_link_down(n, bearer_id, false);
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001599
Jon Paul Maloy52666982015-10-22 08:51:41 -04001600 if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
1601 tipc_named_rcv(net, &n->bc_entry.namedq);
Jon Paul Maloy23d83352015-07-30 18:24:24 -04001602
Jon Paul Maloya853e4c2017-01-18 13:50:52 -05001603 if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
1604 tipc_node_mcast_rcv(n);
1605
Jon Paul Maloy6e498152015-07-30 18:24:19 -04001606 if (!skb_queue_empty(&le->inputq))
1607 tipc_sk_rcv(net, &le->inputq);
1608
1609 if (!skb_queue_empty(&xmitq))
1610 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1611
Jon Paul Maloyd9992972015-07-16 16:54:31 -04001612 tipc_node_put(n);
1613discard:
1614 kfree_skb(skb);
1615}
1616
Jon Maloy37c64cf2018-02-14 13:34:39 +01001617void tipc_node_apply_tolerance(struct net *net, struct tipc_bearer *b)
1618{
1619 struct tipc_net *tn = tipc_net(net);
1620 int bearer_id = b->identity;
1621 struct sk_buff_head xmitq;
1622 struct tipc_link_entry *e;
1623 struct tipc_node *n;
1624
1625 __skb_queue_head_init(&xmitq);
1626
1627 rcu_read_lock();
1628
1629 list_for_each_entry_rcu(n, &tn->node_list, list) {
1630 tipc_node_write_lock(n);
1631 e = &n->links[bearer_id];
1632 if (e->link)
1633 tipc_link_set_tolerance(e->link, b->tolerance, &xmitq);
1634 tipc_node_write_unlock(n);
1635 tipc_bearer_xmit(net, bearer_id, &xmitq, &e->maddr);
1636 }
1637
1638 rcu_read_unlock();
1639}
1640
Richard Alpeb3404022016-08-18 10:33:52 +02001641int tipc_nl_peer_rm(struct sk_buff *skb, struct genl_info *info)
1642{
1643 struct net *net = sock_net(skb->sk);
1644 struct tipc_net *tn = net_generic(net, tipc_net_id);
1645 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
1646 struct tipc_node *peer;
1647 u32 addr;
1648 int err;
1649 int i;
1650
1651 /* We identify the peer by its net */
1652 if (!info->attrs[TIPC_NLA_NET])
1653 return -EINVAL;
1654
1655 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
Johannes Bergfceb6432017-04-12 14:34:07 +02001656 info->attrs[TIPC_NLA_NET], tipc_nl_net_policy,
Johannes Bergfe521452017-04-12 14:34:08 +02001657 info->extack);
Richard Alpeb3404022016-08-18 10:33:52 +02001658 if (err)
1659 return err;
1660
1661 if (!attrs[TIPC_NLA_NET_ADDR])
1662 return -EINVAL;
1663
1664 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
1665
1666 if (in_own_node(net, addr))
1667 return -ENOTSUPP;
1668
1669 spin_lock_bh(&tn->node_list_lock);
1670 peer = tipc_node_find(net, addr);
1671 if (!peer) {
1672 spin_unlock_bh(&tn->node_list_lock);
1673 return -ENXIO;
1674 }
1675
1676 tipc_node_write_lock(peer);
1677 if (peer->state != SELF_DOWN_PEER_DOWN &&
1678 peer->state != SELF_DOWN_PEER_LEAVING) {
1679 tipc_node_write_unlock(peer);
1680 err = -EBUSY;
1681 goto err_out;
1682 }
1683
1684 for (i = 0; i < MAX_BEARERS; i++) {
1685 struct tipc_link_entry *le = &peer->links[i];
1686
1687 if (le->link) {
1688 kfree(le->link);
1689 le->link = NULL;
1690 peer->link_cnt--;
1691 }
1692 }
1693 tipc_node_write_unlock(peer);
1694 tipc_node_delete(peer);
1695
1696 err = 0;
1697err_out:
1698 tipc_node_put(peer);
1699 spin_unlock_bh(&tn->node_list_lock);
1700
1701 return err;
1702}
1703
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001704int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
1705{
1706 int err;
Ying Xuef2f98002015-01-09 15:27:05 +08001707 struct net *net = sock_net(skb->sk);
1708 struct tipc_net *tn = net_generic(net, tipc_net_id);
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001709 int done = cb->args[0];
1710 int last_addr = cb->args[1];
1711 struct tipc_node *node;
1712 struct tipc_nl_msg msg;
1713
1714 if (done)
1715 return 0;
1716
1717 msg.skb = skb;
1718 msg.portid = NETLINK_CB(cb->skb).portid;
1719 msg.seq = cb->nlh->nlmsg_seq;
1720
1721 rcu_read_lock();
Ying Xue8a0f6eb2015-03-26 18:10:24 +08001722 if (last_addr) {
1723 node = tipc_node_find(net, last_addr);
1724 if (!node) {
1725 rcu_read_unlock();
1726 /* We never set seq or call nl_dump_check_consistent()
1727 * this means that setting prev_seq here will cause the
1728 * consistence check to fail in the netlink callback
1729 * handler. Resulting in the NLMSG_DONE message having
1730 * the NLM_F_DUMP_INTR flag set if the node state
1731 * changed while we released the lock.
1732 */
1733 cb->prev_seq = 1;
1734 return -EPIPE;
1735 }
1736 tipc_node_put(node);
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001737 }
1738
Ying Xuef2f98002015-01-09 15:27:05 +08001739 list_for_each_entry_rcu(node, &tn->node_list, list) {
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001740 if (last_addr) {
1741 if (node->addr == last_addr)
1742 last_addr = 0;
1743 else
1744 continue;
1745 }
1746
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001747 tipc_node_read_lock(node);
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001748 err = __tipc_nl_add_node(&msg, node);
1749 if (err) {
1750 last_addr = node->addr;
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001751 tipc_node_read_unlock(node);
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001752 goto out;
1753 }
1754
Jon Paul Maloy5405ff62015-11-19 14:30:44 -05001755 tipc_node_read_unlock(node);
Richard Alpe3e4b6ab2014-11-20 10:29:17 +01001756 }
1757 done = 1;
1758out:
1759 cb->args[0] = done;
1760 cb->args[1] = last_addr;
1761 rcu_read_unlock();
1762
1763 return skb->len;
1764}
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001765
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001766/* tipc_node_find_by_name - locate owner node of link by link's name
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001767 * @net: the applicable net namespace
1768 * @name: pointer to link name string
1769 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1770 *
1771 * Returns pointer to node owning the link, or 0 if no matching link is found.
1772 */
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001773static struct tipc_node *tipc_node_find_by_name(struct net *net,
1774 const char *link_name,
1775 unsigned int *bearer_id)
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001776{
1777 struct tipc_net *tn = net_generic(net, tipc_net_id);
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001778 struct tipc_link *l;
1779 struct tipc_node *n;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001780 struct tipc_node *found_node = NULL;
1781 int i;
1782
1783 *bearer_id = 0;
1784 rcu_read_lock();
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001785 list_for_each_entry_rcu(n, &tn->node_list, list) {
1786 tipc_node_read_lock(n);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001787 for (i = 0; i < MAX_BEARERS; i++) {
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001788 l = n->links[i].link;
1789 if (l && !strcmp(tipc_link_name(l), link_name)) {
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001790 *bearer_id = i;
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001791 found_node = n;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001792 break;
1793 }
1794 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001795 tipc_node_read_unlock(n);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001796 if (found_node)
1797 break;
1798 }
1799 rcu_read_unlock();
1800
1801 return found_node;
1802}
1803
1804int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
1805{
1806 int err;
1807 int res = 0;
1808 int bearer_id;
1809 char *name;
1810 struct tipc_link *link;
1811 struct tipc_node *node;
Richard Alped01332f2016-02-01 08:19:56 +01001812 struct sk_buff_head xmitq;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001813 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1814 struct net *net = sock_net(skb->sk);
1815
Richard Alped01332f2016-02-01 08:19:56 +01001816 __skb_queue_head_init(&xmitq);
1817
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001818 if (!info->attrs[TIPC_NLA_LINK])
1819 return -EINVAL;
1820
1821 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1822 info->attrs[TIPC_NLA_LINK],
Johannes Bergfe521452017-04-12 14:34:08 +02001823 tipc_nl_link_policy, info->extack);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001824 if (err)
1825 return err;
1826
1827 if (!attrs[TIPC_NLA_LINK_NAME])
1828 return -EINVAL;
1829
1830 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1831
1832 if (strcmp(name, tipc_bclink_name) == 0)
1833 return tipc_nl_bc_link_set(net, attrs);
1834
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001835 node = tipc_node_find_by_name(net, name, &bearer_id);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001836 if (!node)
1837 return -EINVAL;
1838
1839 tipc_node_read_lock(node);
1840
1841 link = node->links[bearer_id].link;
1842 if (!link) {
1843 res = -EINVAL;
1844 goto out;
1845 }
1846
1847 if (attrs[TIPC_NLA_LINK_PROP]) {
1848 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1849
1850 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1851 props);
1852 if (err) {
1853 res = err;
1854 goto out;
1855 }
1856
1857 if (props[TIPC_NLA_PROP_TOL]) {
1858 u32 tol;
1859
1860 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
Richard Alped01332f2016-02-01 08:19:56 +01001861 tipc_link_set_tolerance(link, tol, &xmitq);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001862 }
1863 if (props[TIPC_NLA_PROP_PRIO]) {
1864 u32 prio;
1865
1866 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
Richard Alped01332f2016-02-01 08:19:56 +01001867 tipc_link_set_prio(link, prio, &xmitq);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001868 }
1869 if (props[TIPC_NLA_PROP_WIN]) {
1870 u32 win;
1871
1872 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1873 tipc_link_set_queue_limits(link, win);
1874 }
1875 }
1876
1877out:
1878 tipc_node_read_unlock(node);
Richard Alped01332f2016-02-01 08:19:56 +01001879 tipc_bearer_xmit(net, bearer_id, &xmitq, &node->links[bearer_id].maddr);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001880 return res;
1881}
1882
1883int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
1884{
1885 struct net *net = genl_info_net(info);
1886 struct tipc_nl_msg msg;
1887 char *name;
1888 int err;
1889
1890 msg.portid = info->snd_portid;
1891 msg.seq = info->snd_seq;
1892
1893 if (!info->attrs[TIPC_NLA_LINK_NAME])
1894 return -EINVAL;
1895 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1896
1897 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1898 if (!msg.skb)
1899 return -ENOMEM;
1900
1901 if (strcmp(name, tipc_bclink_name) == 0) {
1902 err = tipc_nl_add_bc_link(net, &msg);
Cong Wang59b36612018-01-10 12:50:25 -08001903 if (err)
1904 goto err_free;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001905 } else {
1906 int bearer_id;
1907 struct tipc_node *node;
1908 struct tipc_link *link;
1909
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001910 node = tipc_node_find_by_name(net, name, &bearer_id);
Cong Wang59b36612018-01-10 12:50:25 -08001911 if (!node) {
1912 err = -EINVAL;
1913 goto err_free;
1914 }
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001915
1916 tipc_node_read_lock(node);
1917 link = node->links[bearer_id].link;
1918 if (!link) {
1919 tipc_node_read_unlock(node);
Cong Wang59b36612018-01-10 12:50:25 -08001920 err = -EINVAL;
1921 goto err_free;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001922 }
1923
1924 err = __tipc_nl_add_link(net, &msg, link, 0);
1925 tipc_node_read_unlock(node);
Cong Wang59b36612018-01-10 12:50:25 -08001926 if (err)
1927 goto err_free;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001928 }
1929
1930 return genlmsg_reply(msg.skb, info);
Cong Wang59b36612018-01-10 12:50:25 -08001931
1932err_free:
1933 nlmsg_free(msg.skb);
1934 return err;
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001935}
1936
1937int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
1938{
1939 int err;
1940 char *link_name;
1941 unsigned int bearer_id;
1942 struct tipc_link *link;
1943 struct tipc_node *node;
1944 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1945 struct net *net = sock_net(skb->sk);
1946 struct tipc_link_entry *le;
1947
1948 if (!info->attrs[TIPC_NLA_LINK])
1949 return -EINVAL;
1950
1951 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1952 info->attrs[TIPC_NLA_LINK],
Johannes Bergfe521452017-04-12 14:34:08 +02001953 tipc_nl_link_policy, info->extack);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001954 if (err)
1955 return err;
1956
1957 if (!attrs[TIPC_NLA_LINK_NAME])
1958 return -EINVAL;
1959
1960 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1961
1962 if (strcmp(link_name, tipc_bclink_name) == 0) {
1963 err = tipc_bclink_reset_stats(net);
1964 if (err)
1965 return err;
1966 return 0;
1967 }
1968
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001969 node = tipc_node_find_by_name(net, link_name, &bearer_id);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001970 if (!node)
1971 return -EINVAL;
1972
1973 le = &node->links[bearer_id];
1974 tipc_node_read_lock(node);
1975 spin_lock_bh(&le->lock);
1976 link = node->links[bearer_id].link;
1977 if (!link) {
1978 spin_unlock_bh(&le->lock);
1979 tipc_node_read_unlock(node);
1980 return -EINVAL;
1981 }
Jon Paul Maloy38206d52015-11-19 14:30:46 -05001982 tipc_link_reset_stats(link);
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05001983 spin_unlock_bh(&le->lock);
1984 tipc_node_read_unlock(node);
1985 return 0;
1986}
1987
1988/* Caller should hold node lock */
1989static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1990 struct tipc_node *node, u32 *prev_link)
1991{
1992 u32 i;
1993 int err;
1994
1995 for (i = *prev_link; i < MAX_BEARERS; i++) {
1996 *prev_link = i;
1997
1998 if (!node->links[i].link)
1999 continue;
2000
2001 err = __tipc_nl_add_link(net, msg,
2002 node->links[i].link, NLM_F_MULTI);
2003 if (err)
2004 return err;
2005 }
2006 *prev_link = 0;
2007
2008 return 0;
2009}
2010
Jon Paul Maloy38206d52015-11-19 14:30:46 -05002011int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
Jon Paul Maloy5be9c082015-11-19 14:30:45 -05002012{
2013 struct net *net = sock_net(skb->sk);
2014 struct tipc_net *tn = net_generic(net, tipc_net_id);
2015 struct tipc_node *node;
2016 struct tipc_nl_msg msg;
2017 u32 prev_node = cb->args[0];
2018 u32 prev_link = cb->args[1];
2019 int done = cb->args[2];
2020 int err;
2021
2022 if (done)
2023 return 0;
2024
2025 msg.skb = skb;
2026 msg.portid = NETLINK_CB(cb->skb).portid;
2027 msg.seq = cb->nlh->nlmsg_seq;
2028
2029 rcu_read_lock();
2030 if (prev_node) {
2031 node = tipc_node_find(net, prev_node);
2032 if (!node) {
2033 /* We never set seq or call nl_dump_check_consistent()
2034 * this means that setting prev_seq here will cause the
2035 * consistence check to fail in the netlink callback
2036 * handler. Resulting in the last NLMSG_DONE message
2037 * having the NLM_F_DUMP_INTR flag set.
2038 */
2039 cb->prev_seq = 1;
2040 goto out;
2041 }
2042 tipc_node_put(node);
2043
2044 list_for_each_entry_continue_rcu(node, &tn->node_list,
2045 list) {
2046 tipc_node_read_lock(node);
2047 err = __tipc_nl_add_node_links(net, &msg, node,
2048 &prev_link);
2049 tipc_node_read_unlock(node);
2050 if (err)
2051 goto out;
2052
2053 prev_node = node->addr;
2054 }
2055 } else {
2056 err = tipc_nl_add_bc_link(net, &msg);
2057 if (err)
2058 goto out;
2059
2060 list_for_each_entry_rcu(node, &tn->node_list, list) {
2061 tipc_node_read_lock(node);
2062 err = __tipc_nl_add_node_links(net, &msg, node,
2063 &prev_link);
2064 tipc_node_read_unlock(node);
2065 if (err)
2066 goto out;
2067
2068 prev_node = node->addr;
2069 }
2070 }
2071 done = 1;
2072out:
2073 rcu_read_unlock();
2074
2075 cb->args[0] = prev_node;
2076 cb->args[1] = prev_link;
2077 cb->args[2] = done;
2078
2079 return skb->len;
2080}
Parthasarathy Bhuvaragan7b3f5222016-07-26 08:47:19 +02002081
2082int tipc_nl_node_set_monitor(struct sk_buff *skb, struct genl_info *info)
2083{
2084 struct nlattr *attrs[TIPC_NLA_MON_MAX + 1];
2085 struct net *net = sock_net(skb->sk);
2086 int err;
2087
2088 if (!info->attrs[TIPC_NLA_MON])
2089 return -EINVAL;
2090
2091 err = nla_parse_nested(attrs, TIPC_NLA_MON_MAX,
2092 info->attrs[TIPC_NLA_MON],
Johannes Bergfe521452017-04-12 14:34:08 +02002093 tipc_nl_monitor_policy, info->extack);
Parthasarathy Bhuvaragan7b3f5222016-07-26 08:47:19 +02002094 if (err)
2095 return err;
2096
2097 if (attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]) {
2098 u32 val;
2099
2100 val = nla_get_u32(attrs[TIPC_NLA_MON_ACTIVATION_THRESHOLD]);
2101 err = tipc_nl_monitor_set_threshold(net, val);
2102 if (err)
2103 return err;
2104 }
2105
2106 return 0;
2107}
Parthasarathy Bhuvaraganbf1035b2016-07-26 08:47:20 +02002108
2109static int __tipc_nl_add_monitor_prop(struct net *net, struct tipc_nl_msg *msg)
2110{
2111 struct nlattr *attrs;
2112 void *hdr;
2113 u32 val;
2114
2115 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2116 0, TIPC_NL_MON_GET);
2117 if (!hdr)
2118 return -EMSGSIZE;
2119
2120 attrs = nla_nest_start(msg->skb, TIPC_NLA_MON);
2121 if (!attrs)
2122 goto msg_full;
2123
2124 val = tipc_nl_monitor_get_threshold(net);
2125
2126 if (nla_put_u32(msg->skb, TIPC_NLA_MON_ACTIVATION_THRESHOLD, val))
2127 goto attr_msg_full;
2128
2129 nla_nest_end(msg->skb, attrs);
2130 genlmsg_end(msg->skb, hdr);
2131
2132 return 0;
2133
2134attr_msg_full:
2135 nla_nest_cancel(msg->skb, attrs);
2136msg_full:
2137 genlmsg_cancel(msg->skb, hdr);
2138
2139 return -EMSGSIZE;
2140}
2141
2142int tipc_nl_node_get_monitor(struct sk_buff *skb, struct genl_info *info)
2143{
2144 struct net *net = sock_net(skb->sk);
2145 struct tipc_nl_msg msg;
2146 int err;
2147
2148 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
Pan Bian78302fd2017-04-23 15:09:19 +08002149 if (!msg.skb)
2150 return -ENOMEM;
Parthasarathy Bhuvaraganbf1035b2016-07-26 08:47:20 +02002151 msg.portid = info->snd_portid;
2152 msg.seq = info->snd_seq;
2153
2154 err = __tipc_nl_add_monitor_prop(net, &msg);
2155 if (err) {
2156 nlmsg_free(msg.skb);
2157 return err;
2158 }
2159
2160 return genlmsg_reply(msg.skb, info);
2161}
Parthasarathy Bhuvaragancf6f7e12016-07-26 08:47:22 +02002162
2163int tipc_nl_node_dump_monitor(struct sk_buff *skb, struct netlink_callback *cb)
2164{
2165 struct net *net = sock_net(skb->sk);
2166 u32 prev_bearer = cb->args[0];
2167 struct tipc_nl_msg msg;
2168 int err;
2169 int i;
2170
2171 if (prev_bearer == MAX_BEARERS)
2172 return 0;
2173
2174 msg.skb = skb;
2175 msg.portid = NETLINK_CB(cb->skb).portid;
2176 msg.seq = cb->nlh->nlmsg_seq;
2177
2178 rtnl_lock();
2179 for (i = prev_bearer; i < MAX_BEARERS; i++) {
2180 prev_bearer = i;
2181 err = __tipc_nl_add_monitor(net, &msg, prev_bearer);
2182 if (err)
2183 goto out;
2184 }
2185
2186out:
2187 rtnl_unlock();
2188 cb->args[0] = prev_bearer;
2189
2190 return skb->len;
2191}
2192
2193int tipc_nl_node_dump_monitor_peer(struct sk_buff *skb,
2194 struct netlink_callback *cb)
2195{
2196 struct net *net = sock_net(skb->sk);
2197 u32 prev_node = cb->args[1];
2198 u32 bearer_id = cb->args[2];
2199 int done = cb->args[0];
2200 struct tipc_nl_msg msg;
2201 int err;
2202
2203 if (!prev_node) {
2204 struct nlattr **attrs;
2205 struct nlattr *mon[TIPC_NLA_MON_MAX + 1];
2206
2207 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2208 if (err)
2209 return err;
2210
2211 if (!attrs[TIPC_NLA_MON])
2212 return -EINVAL;
2213
2214 err = nla_parse_nested(mon, TIPC_NLA_MON_MAX,
2215 attrs[TIPC_NLA_MON],
Johannes Bergfceb6432017-04-12 14:34:07 +02002216 tipc_nl_monitor_policy, NULL);
Parthasarathy Bhuvaragancf6f7e12016-07-26 08:47:22 +02002217 if (err)
2218 return err;
2219
2220 if (!mon[TIPC_NLA_MON_REF])
2221 return -EINVAL;
2222
2223 bearer_id = nla_get_u32(mon[TIPC_NLA_MON_REF]);
2224
2225 if (bearer_id >= MAX_BEARERS)
2226 return -EINVAL;
2227 }
2228
2229 if (done)
2230 return 0;
2231
2232 msg.skb = skb;
2233 msg.portid = NETLINK_CB(cb->skb).portid;
2234 msg.seq = cb->nlh->nlmsg_seq;
2235
2236 rtnl_lock();
2237 err = tipc_nl_add_monitor_peer(net, &msg, bearer_id, &prev_node);
2238 if (!err)
2239 done = 1;
2240
2241 rtnl_unlock();
2242 cb->args[0] = done;
2243 cb->args[1] = prev_node;
2244 cb->args[2] = bearer_id;
2245
2246 return skb->len;
2247}