blob: fe522d49f747dc64fe2e492eff70665eccd2951a [file] [log] [blame]
Per Lidenb97bf3f2006-01-02 19:04:38 +01001/*
Jon Paul Maloy02c00c22014-06-09 11:08:18 -05002 * net/tipc/socket.c: TIPC socket API
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003 *
Jon Maloy60c102e2020-11-25 13:29:13 -05004 * Copyright (c) 2001-2007, 2012-2019, Ericsson AB
Ying Xuec5fa7b32013-06-17 10:54:39 -04005 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
Jon Maloy998d3902021-03-16 22:06:08 -04006 * Copyright (c) 2020-2021, Red Hat Inc
Per Lidenb97bf3f2006-01-02 19:04:38 +01007 * All rights reserved.
8 *
Per Liden9ea1fd32006-01-11 13:30:43 +01009 * Redistribution and use in source and binary forms, with or without
Per Lidenb97bf3f2006-01-02 19:04:38 +010010 * modification, are permitted provided that the following conditions are met:
11 *
Per Liden9ea1fd32006-01-11 13:30:43 +010012 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
Per Lidenb97bf3f2006-01-02 19:04:38 +010020 *
Per Liden9ea1fd32006-01-11 13:30:43 +010021 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
Per Lidenb97bf3f2006-01-02 19:04:38 +010035 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
Ying Xue07f6c4b2015-01-07 13:41:58 +080038#include <linux/rhashtable.h>
Ingo Molnar174cd4b2017-02-02 19:15:33 +010039#include <linux/sched/signal.h>
40
Per Lidenb97bf3f2006-01-02 19:04:38 +010041#include "core.h"
Jon Paul Maloye2dafe82014-06-25 20:41:37 -050042#include "name_table.h"
Erik Hugne78acb1f2014-04-24 16:26:47 +020043#include "node.h"
Jon Paul Maloye2dafe82014-06-25 20:41:37 -050044#include "link.h"
Jon Paul Maloyc637c102015-02-05 08:36:41 -050045#include "name_distr.h"
Jon Paul Maloy2e84c602014-08-22 18:09:18 -040046#include "socket.h"
Jon Paul Maloya6bf70f2015-05-14 10:46:13 -040047#include "bcast.h"
Richard Alpe49cc66e2016-03-04 17:04:42 +010048#include "netlink.h"
Jon Maloy75da2162017-10-13 11:04:23 +020049#include "group.h"
Tuong Lienb4b97712018-12-19 09:17:56 +070050#include "trace.h"
Erik Hugne2cf8aa12012-06-29 00:16:37 -040051
Tuong Lien0a3e0602020-05-26 16:38:38 +070052#define NAGLE_START_INIT 4
53#define NAGLE_START_MAX 1024
Tung Nguyen67879272018-09-28 20:23:22 +020054#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
Jon Maloy0d5fcebf2017-10-20 11:21:32 +020055#define CONN_PROBING_INTV msecs_to_jiffies(3600000) /* [ms] => 1 h */
Ying Xue07f6c4b2015-01-07 13:41:58 +080056#define TIPC_MAX_PORT 0xffffffff
57#define TIPC_MIN_PORT 1
Randy Dunlap60462192020-09-17 21:35:19 -070058#define TIPC_ACK_RATE 4 /* ACK at 1/4 of rcv window size */
Jon Paul Maloy301bae52014-08-22 18:09:20 -040059
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +010060enum {
61 TIPC_LISTEN = TCP_LISTEN,
Parthasarathy Bhuvaragan8ea642e2016-11-01 14:02:44 +010062 TIPC_ESTABLISHED = TCP_ESTABLISHED,
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +010063 TIPC_OPEN = TCP_CLOSE,
Parthasarathy Bhuvaragan9fd4b072016-11-01 14:02:46 +010064 TIPC_DISCONNECTING = TCP_CLOSE_WAIT,
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +010065 TIPC_CONNECTING = TCP_SYN_SENT,
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +010066};
67
Jon Maloy31c82a22017-10-13 11:04:24 +020068struct sockaddr_pair {
69 struct sockaddr_tipc sock;
70 struct sockaddr_tipc member;
71};
72
Jon Paul Maloy301bae52014-08-22 18:09:20 -040073/**
74 * struct tipc_sock - TIPC socket structure
75 * @sk: socket - interacts with 'port' and with user via the socket API
Jon Paul Maloy301bae52014-08-22 18:09:20 -040076 * @conn_type: TIPC type used when connection was established
77 * @conn_instance: TIPC instance used when connection was established
78 * @published: non-zero if port has one or more associated names
79 * @max_pkt: maximum packet size "hint" used when building messages sent by port
Jon Maloyc0bceb92019-10-30 14:00:41 +010080 * @maxnagle: maximum size of msg which can be subject to nagle
Ying Xue07f6c4b2015-01-07 13:41:58 +080081 * @portid: unique port identity in TIPC socket hash table
Jon Paul Maloy301bae52014-08-22 18:09:20 -040082 * @phdr: preformatted message header used when sending messages
Randy Dunlapf172f4b2020-11-29 10:32:49 -080083 * @cong_links: list of congested links
Jon Paul Maloy301bae52014-08-22 18:09:20 -040084 * @publications: list of publications for port
Jon Paul Maloy365ad352017-01-03 10:55:11 -050085 * @blocking_link: address of the congested link we are currently sleeping on
Jon Paul Maloy301bae52014-08-22 18:09:20 -040086 * @pub_count: total # of publications port has made during its lifetime
Jon Paul Maloy301bae52014-08-22 18:09:20 -040087 * @conn_timeout: the time we can wait for an unresponded setup request
Randy Dunlapf172f4b2020-11-29 10:32:49 -080088 * @probe_unacked: probe has not received ack yet
Jon Paul Maloy301bae52014-08-22 18:09:20 -040089 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
Jon Paul Maloy365ad352017-01-03 10:55:11 -050090 * @cong_link_cnt: number of congested links
Jon Maloy75da2162017-10-13 11:04:23 +020091 * @snt_unacked: # messages sent by socket, and not yet acked by peer
Randy Dunlapf172f4b2020-11-29 10:32:49 -080092 * @snd_win: send window size
93 * @peer_caps: peer capabilities mask
Jon Paul Maloy301bae52014-08-22 18:09:20 -040094 * @rcv_unacked: # messages read by user, but not yet acked back to peer
Randy Dunlapf172f4b2020-11-29 10:32:49 -080095 * @rcv_win: receive window size
Parthasarathy Bhuvaraganaeda16b2016-11-01 14:02:38 +010096 * @peer: 'connected' peer for dgram/rdm
Ying Xue07f6c4b2015-01-07 13:41:58 +080097 * @node: hash table node
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -050098 * @mc_method: cookie for use between socket and broadcast layer
Ying Xue07f6c4b2015-01-07 13:41:58 +080099 * @rcu: rcu struct for tipc_sock
Randy Dunlapf172f4b2020-11-29 10:32:49 -0800100 * @group: TIPC communications group
101 * @oneway: message count in one direction (FIXME)
102 * @nagle_start: current nagle value
103 * @snd_backlog: send backlog count
104 * @msg_acc: messages accepted; used in managing backlog and nagle
105 * @pkt_cnt: TIPC socket packet count
106 * @expect_ack: whether this TIPC socket is expecting an ack
107 * @nodelay: setsockopt() TIPC_NODELAY setting
108 * @group_is_open: TIPC socket group is fully open (FIXME)
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400109 */
110struct tipc_sock {
111 struct sock sk;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400112 u32 conn_type;
113 u32 conn_instance;
114 int published;
115 u32 max_pkt;
Jon Maloyc0bceb92019-10-30 14:00:41 +0100116 u32 maxnagle;
Ying Xue07f6c4b2015-01-07 13:41:58 +0800117 u32 portid;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400118 struct tipc_msg phdr;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500119 struct list_head cong_links;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400120 struct list_head publications;
121 u32 pub_count;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400122 atomic_t dupl_rcvcnt;
Tung Nguyen67879272018-09-28 20:23:22 +0200123 u16 conn_timeout;
Parthasarathy Bhuvaragan8ea642e2016-11-01 14:02:44 +0100124 bool probe_unacked;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500125 u16 cong_link_cnt;
Jon Paul Maloy10724cc2016-05-02 11:58:47 -0400126 u16 snt_unacked;
127 u16 snd_win;
Jon Paul Maloy60020e12016-05-02 11:58:46 -0400128 u16 peer_caps;
Jon Paul Maloy10724cc2016-05-02 11:58:47 -0400129 u16 rcv_unacked;
130 u16 rcv_win;
Parthasarathy Bhuvaraganaeda16b2016-11-01 14:02:38 +0100131 struct sockaddr_tipc peer;
Ying Xue07f6c4b2015-01-07 13:41:58 +0800132 struct rhash_head node;
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500133 struct tipc_mc_method mc_method;
Ying Xue07f6c4b2015-01-07 13:41:58 +0800134 struct rcu_head rcu;
Jon Maloy75da2162017-10-13 11:04:23 +0200135 struct tipc_group *group;
Jon Maloyc0bceb92019-10-30 14:00:41 +0100136 u32 oneway;
Tuong Lien0a3e0602020-05-26 16:38:38 +0700137 u32 nagle_start;
Jon Maloyc0bceb92019-10-30 14:00:41 +0100138 u16 snd_backlog;
Tuong Lien0a3e0602020-05-26 16:38:38 +0700139 u16 msg_acc;
140 u16 pkt_cnt;
Jon Maloyc0bceb92019-10-30 14:00:41 +0100141 bool expect_ack;
142 bool nodelay;
Jon Maloy60c25302018-01-17 16:42:46 +0100143 bool group_is_open;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400144};
Per Lidenb97bf3f2006-01-02 19:04:38 +0100145
Jon Maloy64ac5f52017-10-13 11:04:20 +0200146static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb);
David S. Miller676d2362014-04-11 16:15:36 -0400147static void tipc_data_ready(struct sock *sk);
Ying Xuef288bef2012-08-21 11:16:57 +0800148static void tipc_write_space(struct sock *sk);
Ying Xuef4195d12015-11-22 15:46:05 +0800149static void tipc_sock_destruct(struct sock *sk);
Ying Xue247f0f32014-02-18 16:06:46 +0800150static int tipc_release(struct socket *sock);
David Howellscdfbabf2017-03-09 08:09:05 +0000151static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
152 bool kern);
Kees Cook31b102b2017-10-30 14:06:45 -0700153static void tipc_sk_timeout(struct timer_list *t);
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400154static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
Jon Maloyb6f88d92020-11-25 13:29:15 -0500155 struct tipc_service_range const *seq);
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400156static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
Jon Maloyb6f88d92020-11-25 13:29:15 -0500157 struct tipc_service_range const *seq);
Jon Maloy75da2162017-10-13 11:04:23 +0200158static int tipc_sk_leave(struct tipc_sock *tsk);
Ying Xuee05b31f2015-01-09 15:27:08 +0800159static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
Ying Xue07f6c4b2015-01-07 13:41:58 +0800160static int tipc_sk_insert(struct tipc_sock *tsk);
161static void tipc_sk_remove(struct tipc_sock *tsk);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500162static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz);
Ying Xue39a0295f2015-03-02 15:37:47 +0800163static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dsz);
Tuong Lien0a3e0602020-05-26 16:38:38 +0700164static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100165
Florian Westphalbca65ea2008-02-07 18:18:01 -0800166static const struct proto_ops packet_ops;
167static const struct proto_ops stream_ops;
168static const struct proto_ops msg_ops;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100169static struct proto tipc_proto;
Herbert Xu6cca72892015-03-20 21:57:05 +1100170static const struct rhashtable_params tsk_rht_params;
171
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500172static u32 tsk_own_node(struct tipc_sock *tsk)
173{
174 return msg_prevnode(&tsk->phdr);
175}
176
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400177static u32 tsk_peer_node(struct tipc_sock *tsk)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400178{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400179 return msg_destnode(&tsk->phdr);
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400180}
181
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400182static u32 tsk_peer_port(struct tipc_sock *tsk)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400183{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400184 return msg_destport(&tsk->phdr);
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400185}
186
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400187static bool tsk_unreliable(struct tipc_sock *tsk)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400188{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400189 return msg_src_droppable(&tsk->phdr) != 0;
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400190}
191
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400192static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400193{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400194 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400195}
196
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400197static bool tsk_unreturnable(struct tipc_sock *tsk)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400198{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400199 return msg_dest_droppable(&tsk->phdr) != 0;
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400200}
201
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400202static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400203{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400204 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400205}
206
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400207static int tsk_importance(struct tipc_sock *tsk)
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400208{
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400209 return msg_importance(&tsk->phdr);
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400210}
211
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400212static struct tipc_sock *tipc_sk(const struct sock *sk)
213{
214 return container_of(sk, struct tipc_sock, sk);
215}
216
Christoph Hellwig095ae612020-05-28 07:12:36 +0200217int tsk_set_importance(struct sock *sk, int imp)
218{
219 if (imp > TIPC_CRITICAL_IMPORTANCE)
220 return -EINVAL;
221 msg_set_importance(&tipc_sk(sk)->phdr, (u32)imp);
222 return 0;
223}
224
Jon Paul Maloy10724cc2016-05-02 11:58:47 -0400225static bool tsk_conn_cong(struct tipc_sock *tsk)
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400226{
Jon Paul Maloy6998cc62016-11-24 18:47:07 -0500227 return tsk->snt_unacked > tsk->snd_win;
Jon Paul Maloy10724cc2016-05-02 11:58:47 -0400228}
229
Jon Maloyb7d42632017-10-13 11:04:26 +0200230static u16 tsk_blocks(int len)
231{
232 return ((len / FLOWCTL_BLK_SZ) + 1);
233}
234
Jon Paul Maloy10724cc2016-05-02 11:58:47 -0400235/* tsk_blocks(): translate a buffer size in bytes to number of
236 * advertisable blocks, taking into account the ratio truesize(len)/len
237 * We can trust that this ratio is always < 4 for len >= FLOWCTL_BLK_SZ
238 */
239static u16 tsk_adv_blocks(int len)
240{
241 return len / FLOWCTL_BLK_SZ / 4;
242}
243
244/* tsk_inc(): increment counter for sent or received data
245 * - If block based flow control is not supported by peer we
246 * fall back to message based ditto, incrementing the counter
247 */
248static u16 tsk_inc(struct tipc_sock *tsk, int msglen)
249{
250 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
251 return ((msglen / FLOWCTL_BLK_SZ) + 1);
252 return 1;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400253}
254
Jon Maloyc0bceb92019-10-30 14:00:41 +0100255/* tsk_set_nagle - enable/disable nagle property by manipulating maxnagle
256 */
257static void tsk_set_nagle(struct tipc_sock *tsk)
258{
259 struct sock *sk = &tsk->sk;
260
261 tsk->maxnagle = 0;
262 if (sk->sk_type != SOCK_STREAM)
263 return;
264 if (tsk->nodelay)
265 return;
266 if (!(tsk->peer_caps & TIPC_NAGLE))
267 return;
268 /* Limit node local buffer size to avoid receive queue overflow */
269 if (tsk->max_pkt == MAX_MSG_SIZE)
270 tsk->maxnagle = 1500;
271 else
272 tsk->maxnagle = tsk->max_pkt;
273}
274
Per Lidenb97bf3f2006-01-02 19:04:38 +0100275/**
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400276 * tsk_advance_rx_queue - discard first buffer in socket receive queue
Randy Dunlapf172f4b2020-11-29 10:32:49 -0800277 * @sk: network socket
Allan Stephens0c3141e2008-04-15 00:22:02 -0700278 *
279 * Caller must hold socket lock
Per Lidenb97bf3f2006-01-02 19:04:38 +0100280 */
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400281static void tsk_advance_rx_queue(struct sock *sk)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100282{
Tuong Lien01e661e2018-12-19 09:17:58 +0700283 trace_tipc_sk_advance_rx(sk, NULL, TIPC_DUMP_SK_RCVQ, " ");
Allan Stephens5f6d9122011-11-04 13:24:29 -0400284 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
Per Lidenb97bf3f2006-01-02 19:04:38 +0100285}
286
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -0400287/* tipc_sk_respond() : send response message back to sender
288 */
289static void tipc_sk_respond(struct sock *sk, struct sk_buff *skb, int err)
290{
291 u32 selector;
292 u32 dnode;
293 u32 onode = tipc_own_addr(sock_net(sk));
294
295 if (!tipc_msg_reverse(onode, &skb, err))
296 return;
297
Tuong Lien01e661e2018-12-19 09:17:58 +0700298 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE, "@sk_respond!");
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -0400299 dnode = msg_destnode(buf_msg(skb));
300 selector = msg_origport(buf_msg(skb));
301 tipc_node_xmit_skb(sock_net(sk), skb, dnode, selector);
302}
303
Per Lidenb97bf3f2006-01-02 19:04:38 +0100304/**
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400305 * tsk_rej_rx_queue - reject all buffers in socket receive queue
Randy Dunlapf172f4b2020-11-29 10:32:49 -0800306 * @sk: network socket
307 * @error: response error code
Allan Stephens0c3141e2008-04-15 00:22:02 -0700308 *
309 * Caller must hold socket lock
310 */
Tuong Lien49afb802020-01-08 09:18:15 +0700311static void tsk_rej_rx_queue(struct sock *sk, int error)
Allan Stephens0c3141e2008-04-15 00:22:02 -0700312{
Ying Xuea6ca1092014-11-26 11:41:55 +0800313 struct sk_buff *skb;
Allan Stephens0c3141e2008-04-15 00:22:02 -0700314
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -0400315 while ((skb = __skb_dequeue(&sk->sk_receive_queue)))
Tuong Lien49afb802020-01-08 09:18:15 +0700316 tipc_sk_respond(sk, skb, error);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700317}
318
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +0100319static bool tipc_sk_connected(struct sock *sk)
320{
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100321 return sk->sk_state == TIPC_ESTABLISHED;
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +0100322}
323
Parthasarathy Bhuvaraganc752023a2016-11-01 14:02:42 +0100324/* tipc_sk_type_connectionless - check if the socket is datagram socket
325 * @sk: socket
326 *
327 * Returns true if connection less, false otherwise
328 */
329static bool tipc_sk_type_connectionless(struct sock *sk)
330{
331 return sk->sk_type == SOCK_RDM || sk->sk_type == SOCK_DGRAM;
332}
333
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400334/* tsk_peer_msg - verify if message was sent by connected port's peer
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400335 *
336 * Handles cases where the node's network address has changed from
337 * the default of <0.0.0> to its configured setting.
338 */
Jon Paul Maloy2e84c602014-08-22 18:09:18 -0400339static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400340{
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +0100341 struct sock *sk = &tsk->sk;
Jon Maloy23fd3ea2018-03-22 20:42:49 +0100342 u32 self = tipc_own_addr(sock_net(sk));
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400343 u32 peer_port = tsk_peer_port(tsk);
Jon Maloy23fd3ea2018-03-22 20:42:49 +0100344 u32 orig_node, peer_node;
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400345
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +0100346 if (unlikely(!tipc_sk_connected(sk)))
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400347 return false;
348
349 if (unlikely(msg_origport(msg) != peer_port))
350 return false;
351
352 orig_node = msg_orignode(msg);
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400353 peer_node = tsk_peer_node(tsk);
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400354
355 if (likely(orig_node == peer_node))
356 return true;
357
Jon Maloy23fd3ea2018-03-22 20:42:49 +0100358 if (!orig_node && peer_node == self)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400359 return true;
360
Jon Maloy23fd3ea2018-03-22 20:42:49 +0100361 if (!peer_node && orig_node == self)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -0400362 return true;
363
364 return false;
365}
366
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +0100367/* tipc_set_sk_state - set the sk_state of the socket
368 * @sk: socket
369 *
370 * Caller must hold socket lock
371 *
372 * Returns 0 on success, errno otherwise
373 */
374static int tipc_set_sk_state(struct sock *sk, int state)
375{
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +0100376 int oldsk_state = sk->sk_state;
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +0100377 int res = -EINVAL;
378
379 switch (state) {
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +0100380 case TIPC_OPEN:
381 res = 0;
382 break;
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +0100383 case TIPC_LISTEN:
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +0100384 case TIPC_CONNECTING:
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +0100385 if (oldsk_state == TIPC_OPEN)
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +0100386 res = 0;
387 break;
Parthasarathy Bhuvaragan8ea642e2016-11-01 14:02:44 +0100388 case TIPC_ESTABLISHED:
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +0100389 if (oldsk_state == TIPC_CONNECTING ||
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +0100390 oldsk_state == TIPC_OPEN)
Parthasarathy Bhuvaragan8ea642e2016-11-01 14:02:44 +0100391 res = 0;
392 break;
Parthasarathy Bhuvaragan9fd4b072016-11-01 14:02:46 +0100393 case TIPC_DISCONNECTING:
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +0100394 if (oldsk_state == TIPC_CONNECTING ||
Parthasarathy Bhuvaragan9fd4b072016-11-01 14:02:46 +0100395 oldsk_state == TIPC_ESTABLISHED)
396 res = 0;
397 break;
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +0100398 }
399
400 if (!res)
401 sk->sk_state = state;
402
403 return res;
404}
405
Jon Paul Maloy8c44e1a2017-01-03 10:55:09 -0500406static int tipc_sk_sock_err(struct socket *sock, long *timeout)
407{
408 struct sock *sk = sock->sk;
409 int err = sock_error(sk);
410 int typ = sock->type;
411
412 if (err)
413 return err;
414 if (typ == SOCK_STREAM || typ == SOCK_SEQPACKET) {
415 if (sk->sk_state == TIPC_DISCONNECTING)
416 return -EPIPE;
417 else if (!tipc_sk_connected(sk))
418 return -ENOTCONN;
419 }
420 if (!*timeout)
421 return -EAGAIN;
422 if (signal_pending(current))
423 return sock_intr_errno(*timeout);
424
425 return 0;
426}
427
Jon Paul Maloy844cf762017-05-11 20:28:15 +0200428#define tipc_wait_for_cond(sock_, timeo_, condition_) \
429({ \
Tung Nguyenbfd07f32019-02-25 10:57:20 +0700430 DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
Jon Paul Maloy844cf762017-05-11 20:28:15 +0200431 struct sock *sk_; \
432 int rc_; \
433 \
434 while ((rc_ = !(condition_))) { \
Tung Nguyenbfd07f32019-02-25 10:57:20 +0700435 /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
436 smp_rmb(); \
Jon Paul Maloy844cf762017-05-11 20:28:15 +0200437 sk_ = (sock_)->sk; \
438 rc_ = tipc_sk_sock_err((sock_), timeo_); \
439 if (rc_) \
440 break; \
Tung Nguyen223b7322019-02-19 11:20:47 +0700441 add_wait_queue(sk_sleep(sk_), &wait_); \
Jon Paul Maloy844cf762017-05-11 20:28:15 +0200442 release_sock(sk_); \
443 *(timeo_) = wait_woken(&wait_, TASK_INTERRUPTIBLE, *(timeo_)); \
444 sched_annotate_sleep(); \
445 lock_sock(sk_); \
446 remove_wait_queue(sk_sleep(sk_), &wait_); \
447 } \
448 rc_; \
Jon Paul Maloy8c44e1a2017-01-03 10:55:09 -0500449})
450
Allan Stephens0c3141e2008-04-15 00:22:02 -0700451/**
Ying Xuec5fa7b32013-06-17 10:54:39 -0400452 * tipc_sk_create - create a TIPC socket
Allan Stephens0c3141e2008-04-15 00:22:02 -0700453 * @net: network namespace (must be default network)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100454 * @sock: pre-allocated socket structure
455 * @protocol: protocol indicator (must be 0)
Eric Paris3f378b62009-11-05 22:18:14 -0800456 * @kern: caused by kernel or by userspace?
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900457 *
Allan Stephens0c3141e2008-04-15 00:22:02 -0700458 * This routine creates additional data structures used by the TIPC socket,
459 * initializes them, and links them together.
Per Lidenb97bf3f2006-01-02 19:04:38 +0100460 *
Randy Dunlap637b77f2020-11-29 10:32:48 -0800461 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +0100462 */
Jon Paul Maloy58ed9442014-03-12 11:31:12 -0400463static int tipc_sk_create(struct net *net, struct socket *sock,
464 int protocol, int kern)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100465{
Allan Stephens0c3141e2008-04-15 00:22:02 -0700466 const struct proto_ops *ops;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100467 struct sock *sk;
Jon Paul Maloy58ed9442014-03-12 11:31:12 -0400468 struct tipc_sock *tsk;
Jon Paul Maloy5b8fa7c2014-08-22 18:09:13 -0400469 struct tipc_msg *msg;
Allan Stephens0c3141e2008-04-15 00:22:02 -0700470
471 /* Validate arguments */
Per Lidenb97bf3f2006-01-02 19:04:38 +0100472 if (unlikely(protocol != 0))
473 return -EPROTONOSUPPORT;
474
Per Lidenb97bf3f2006-01-02 19:04:38 +0100475 switch (sock->type) {
476 case SOCK_STREAM:
Allan Stephens0c3141e2008-04-15 00:22:02 -0700477 ops = &stream_ops;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100478 break;
479 case SOCK_SEQPACKET:
Allan Stephens0c3141e2008-04-15 00:22:02 -0700480 ops = &packet_ops;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100481 break;
482 case SOCK_DGRAM:
Per Lidenb97bf3f2006-01-02 19:04:38 +0100483 case SOCK_RDM:
Allan Stephens0c3141e2008-04-15 00:22:02 -0700484 ops = &msg_ops;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100485 break;
Allan Stephens49978652006-06-25 23:47:18 -0700486 default:
Allan Stephens49978652006-06-25 23:47:18 -0700487 return -EPROTOTYPE;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100488 }
489
Allan Stephens0c3141e2008-04-15 00:22:02 -0700490 /* Allocate socket's protocol area */
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500491 sk = sk_alloc(net, AF_TIPC, GFP_KERNEL, &tipc_proto, kern);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700492 if (sk == NULL)
493 return -ENOMEM;
494
Jon Paul Maloy58ed9442014-03-12 11:31:12 -0400495 tsk = tipc_sk(sk);
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400496 tsk->max_pkt = MAX_PKT_DEFAULT;
Jon Maloyc0bceb92019-10-30 14:00:41 +0100497 tsk->maxnagle = 0;
Tuong Lien0a3e0602020-05-26 16:38:38 +0700498 tsk->nagle_start = NAGLE_START_INIT;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400499 INIT_LIST_HEAD(&tsk->publications);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500500 INIT_LIST_HEAD(&tsk->cong_links);
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400501 msg = &tsk->phdr;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100502
Allan Stephens0c3141e2008-04-15 00:22:02 -0700503 /* Finish initializing socket data structures */
Allan Stephens0c3141e2008-04-15 00:22:02 -0700504 sock->ops = ops;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100505 sock_init_data(sock, sk);
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +0100506 tipc_set_sk_state(sk, TIPC_OPEN);
Ying Xue07f6c4b2015-01-07 13:41:58 +0800507 if (tipc_sk_insert(tsk)) {
Masanari Iidac19ca6c2016-02-08 20:53:12 +0900508 pr_warn("Socket create failed; port number exhausted\n");
Ying Xue07f6c4b2015-01-07 13:41:58 +0800509 return -EINVAL;
510 }
Herbert Xu40f9f432017-02-11 19:26:46 +0800511
512 /* Ensure tsk is visible before we read own_addr. */
513 smp_mb();
514
Jon Maloy23fd3ea2018-03-22 20:42:49 +0100515 tipc_msg_init(tipc_own_addr(net), msg, TIPC_LOW_IMPORTANCE,
516 TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
Herbert Xu40f9f432017-02-11 19:26:46 +0800517
Ying Xue07f6c4b2015-01-07 13:41:58 +0800518 msg_set_origport(msg, tsk->portid);
Kees Cook31b102b2017-10-30 14:06:45 -0700519 timer_setup(&sk->sk_timer, tipc_sk_timeout, 0);
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100520 sk->sk_shutdown = 0;
Jon Maloy64ac5f52017-10-13 11:04:20 +0200521 sk->sk_backlog_rcv = tipc_sk_backlog_rcv;
Ying Xuecc79dd12013-06-17 10:54:37 -0400522 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
Ying Xuef288bef2012-08-21 11:16:57 +0800523 sk->sk_data_ready = tipc_data_ready;
524 sk->sk_write_space = tipc_write_space;
Ying Xuef4195d12015-11-22 15:46:05 +0800525 sk->sk_destruct = tipc_sock_destruct;
Jon Paul Maloy4f4482d2014-05-14 05:39:09 -0400526 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
Jon Maloy1b22bca2018-02-26 20:14:04 +0100527 tsk->group_is_open = true;
Jon Paul Maloy4f4482d2014-05-14 05:39:09 -0400528 atomic_set(&tsk->dupl_rcvcnt, 0);
Allan Stephens7ef43eb2008-05-12 15:42:28 -0700529
Jon Paul Maloy10724cc2016-05-02 11:58:47 -0400530 /* Start out with safe limits until we receive an advertised window */
531 tsk->snd_win = tsk_adv_blocks(RCVBUF_MIN);
532 tsk->rcv_win = tsk->snd_win;
533
Parthasarathy Bhuvaraganc752023a2016-11-01 14:02:42 +0100534 if (tipc_sk_type_connectionless(sk)) {
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400535 tsk_set_unreturnable(tsk, true);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700536 if (sock->type == SOCK_DGRAM)
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400537 tsk_set_unreliable(tsk, true);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700538 }
Jon Maloy2948a1f2019-07-30 20:19:10 +0200539 __skb_queue_head_init(&tsk->mc_method.deferredq);
Tuong Lien01e661e2018-12-19 09:17:58 +0700540 trace_tipc_sk_create(sk, NULL, TIPC_DUMP_NONE, " ");
Per Lidenb97bf3f2006-01-02 19:04:38 +0100541 return 0;
542}
543
Ying Xue07f6c4b2015-01-07 13:41:58 +0800544static void tipc_sk_callback(struct rcu_head *head)
545{
546 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
547
548 sock_put(&tsk->sk);
549}
550
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100551/* Caller should hold socket lock for the socket. */
552static void __tipc_shutdown(struct socket *sock, int error)
553{
554 struct sock *sk = sock->sk;
555 struct tipc_sock *tsk = tipc_sk(sk);
556 struct net *net = sock_net(sk);
Tung Nguyen12db3c82019-11-28 10:10:07 +0700557 long timeout = msecs_to_jiffies(CONN_TIMEOUT_DEFAULT);
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100558 u32 dnode = tsk_peer_node(tsk);
559 struct sk_buff *skb;
560
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500561 /* Avoid that hi-prio shutdown msgs bypass msgs in link wakeup queue */
562 tipc_wait_for_cond(sock, &timeout, (!tsk->cong_link_cnt &&
563 !tsk_conn_cong(tsk)));
564
Tung Nguyend34910e2019-11-28 10:10:08 +0700565 /* Push out delayed messages if in Nagle mode */
Tuong Lien0a3e0602020-05-26 16:38:38 +0700566 tipc_sk_push_backlog(tsk, false);
Tung Nguyend34910e2019-11-28 10:10:08 +0700567 /* Remove pending SYN */
568 __skb_queue_purge(&sk->sk_write_queue);
Tung Nguyen67879272018-09-28 20:23:22 +0200569
Tuong Lien49afb802020-01-08 09:18:15 +0700570 /* Remove partially received buffer if any */
571 skb = skb_peek(&sk->sk_receive_queue);
572 if (skb && TIPC_SKB_CB(skb)->bytes_read) {
573 __skb_unlink(skb, &sk->sk_receive_queue);
574 kfree_skb(skb);
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100575 }
Jon Paul Maloy693c5642016-12-22 07:22:29 -0500576
Tuong Lien49afb802020-01-08 09:18:15 +0700577 /* Reject all unreceived messages if connectionless */
578 if (tipc_sk_type_connectionless(sk)) {
579 tsk_rej_rx_queue(sk, error);
Jon Paul Maloy693c5642016-12-22 07:22:29 -0500580 return;
Tuong Lien49afb802020-01-08 09:18:15 +0700581 }
Jon Paul Maloy693c5642016-12-22 07:22:29 -0500582
Tuong Lien49afb802020-01-08 09:18:15 +0700583 switch (sk->sk_state) {
584 case TIPC_CONNECTING:
585 case TIPC_ESTABLISHED:
586 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
587 tipc_node_remove_conn(net, dnode, tsk->portid);
588 /* Send a FIN+/- to its peer */
589 skb = __skb_dequeue(&sk->sk_receive_queue);
590 if (skb) {
591 __skb_queue_purge(&sk->sk_receive_queue);
592 tipc_sk_respond(sk, skb, error);
593 break;
594 }
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100595 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
596 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
597 tsk_own_node(tsk), tsk_peer_port(tsk),
598 tsk->portid, error);
599 if (skb)
600 tipc_node_xmit_skb(net, skb, dnode, tsk->portid);
Tuong Lien49afb802020-01-08 09:18:15 +0700601 break;
602 case TIPC_LISTEN:
603 /* Reject all SYN messages */
604 tsk_rej_rx_queue(sk, error);
605 break;
606 default:
607 __skb_queue_purge(&sk->sk_receive_queue);
608 break;
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100609 }
610}
611
Ying Xuec5fa7b32013-06-17 10:54:39 -0400612/**
Ying Xue247f0f32014-02-18 16:06:46 +0800613 * tipc_release - destroy a TIPC socket
Per Lidenb97bf3f2006-01-02 19:04:38 +0100614 * @sock: socket to destroy
615 *
616 * This routine cleans up any messages that are still queued on the socket.
617 * For DGRAM and RDM socket types, all queued messages are rejected.
618 * For SEQPACKET and STREAM socket types, the first message is rejected
619 * and any others are discarded. (If the first message on a STREAM socket
620 * is partially-read, it is discarded and the next one is rejected instead.)
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900621 *
Per Lidenb97bf3f2006-01-02 19:04:38 +0100622 * NOTE: Rejected messages are not necessarily returned to the sender! They
623 * are returned or discarded according to the "destination droppable" setting
624 * specified for the message by the sender.
625 *
Randy Dunlap637b77f2020-11-29 10:32:48 -0800626 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +0100627 */
Ying Xue247f0f32014-02-18 16:06:46 +0800628static int tipc_release(struct socket *sock)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100629{
Per Lidenb97bf3f2006-01-02 19:04:38 +0100630 struct sock *sk = sock->sk;
Jon Paul Maloy58ed9442014-03-12 11:31:12 -0400631 struct tipc_sock *tsk;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100632
Allan Stephens0c3141e2008-04-15 00:22:02 -0700633 /*
634 * Exit if socket isn't fully initialized (occurs when a failed accept()
635 * releases a pre-allocated child socket that was never used)
636 */
Allan Stephens0c3141e2008-04-15 00:22:02 -0700637 if (sk == NULL)
638 return 0;
639
Jon Paul Maloy58ed9442014-03-12 11:31:12 -0400640 tsk = tipc_sk(sk);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700641 lock_sock(sk);
642
Tuong Lien01e661e2018-12-19 09:17:58 +0700643 trace_tipc_sk_release(sk, NULL, TIPC_DUMP_ALL, " ");
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100644 __tipc_shutdown(sock, TIPC_ERR_NO_PORT);
645 sk->sk_shutdown = SHUTDOWN_MASK;
Jon Maloy75da2162017-10-13 11:04:23 +0200646 tipc_sk_leave(tsk);
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400647 tipc_sk_withdraw(tsk, 0, NULL);
Hoang Lec55c8ed2019-03-19 18:49:50 +0700648 __skb_queue_purge(&tsk->mc_method.deferredq);
Ying Xue1ea23a22015-05-28 13:19:22 +0800649 sk_stop_timer(sk, &sk->sk_timer);
Ying Xue07f6c4b2015-01-07 13:41:58 +0800650 tipc_sk_remove(tsk);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100651
Cong Wang0a3b8b22018-09-03 19:12:41 -0700652 sock_orphan(sk);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700653 /* Reject any messages that accumulated in backlog queue */
Allan Stephens0c3141e2008-04-15 00:22:02 -0700654 release_sock(sk);
Jon Maloya80ae532017-10-13 11:04:22 +0200655 tipc_dest_list_purge(&tsk->cong_links);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500656 tsk->cong_link_cnt = 0;
Ying Xue07f6c4b2015-01-07 13:41:58 +0800657 call_rcu(&tsk->rcu, tipc_sk_callback);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700658 sock->sk = NULL;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100659
Geert Uytterhoeven065d7e32014-04-06 15:56:14 +0200660 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100661}
662
663/**
Jon Maloy60c102e2020-11-25 13:29:13 -0500664 * __tipc_bind - associate or disassocate TIPC name(s) with a socket
Per Lidenb97bf3f2006-01-02 19:04:38 +0100665 * @sock: socket structure
Jon Maloy60c102e2020-11-25 13:29:13 -0500666 * @skaddr: socket address describing name(s) and desired operation
667 * @alen: size of socket address data structure
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900668 *
Per Lidenb97bf3f2006-01-02 19:04:38 +0100669 * Name and name sequence binding is indicated using a positive scope value;
670 * a negative scope value unbinds the specified name. Specifying no name
671 * (i.e. a socket address length of 0) unbinds all names from the socket.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900672 *
Randy Dunlap637b77f2020-11-29 10:32:48 -0800673 * Return: 0 on success, errno otherwise
Allan Stephens0c3141e2008-04-15 00:22:02 -0700674 *
675 * NOTE: This routine doesn't need to take the socket lock since it doesn't
676 * access any non-constant socket information.
Per Lidenb97bf3f2006-01-02 19:04:38 +0100677 */
Jon Maloy60c102e2020-11-25 13:29:13 -0500678static int __tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100679{
Jon Maloy60c102e2020-11-25 13:29:13 -0500680 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)skaddr;
681 struct tipc_sock *tsk = tipc_sk(sock->sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100682
Jon Maloy60c102e2020-11-25 13:29:13 -0500683 if (unlikely(!alen))
684 return tipc_sk_withdraw(tsk, 0, NULL);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100685
Jon Maloyb6f88d92020-11-25 13:29:15 -0500686 if (addr->addrtype == TIPC_SERVICE_ADDR)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100687 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900688
Jon Maloy60c102e2020-11-25 13:29:13 -0500689 if (tsk->group)
690 return -EACCES;
691
692 if (addr->scope >= 0)
693 return tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq);
694 else
695 return tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
696}
697
698int tipc_sk_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
699{
700 int res;
701
702 lock_sock(sock->sk);
703 res = __tipc_bind(sock, skaddr, alen);
704 release_sock(sock->sk);
Ying Xue84602762013-12-27 10:18:28 +0800705 return res;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100706}
707
Jon Maloy72671b32020-10-29 21:29:38 -0400708static int tipc_bind(struct socket *sock, struct sockaddr *skaddr, int alen)
709{
710 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)skaddr;
711
712 if (alen) {
713 if (alen < sizeof(struct sockaddr_tipc))
714 return -EINVAL;
Jon Maloy60c102e2020-11-25 13:29:13 -0500715 if (addr->family != AF_TIPC)
716 return -EAFNOSUPPORT;
717 if (addr->addrtype > TIPC_SERVICE_ADDR)
718 return -EAFNOSUPPORT;
Jon Maloy72671b32020-10-29 21:29:38 -0400719 if (addr->addr.nameseq.type < TIPC_RESERVED_TYPES) {
720 pr_warn_once("Can't bind to reserved service type %u\n",
721 addr->addr.nameseq.type);
722 return -EACCES;
723 }
724 }
725 return tipc_sk_bind(sock, skaddr, alen);
726}
727
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900728/**
Ying Xue247f0f32014-02-18 16:06:46 +0800729 * tipc_getname - get port ID of socket or peer socket
Per Lidenb97bf3f2006-01-02 19:04:38 +0100730 * @sock: socket structure
731 * @uaddr: area for returned socket address
Allan Stephens2da59912008-07-14 22:43:32 -0700732 * @peer: 0 = own ID, 1 = current peer ID, 2 = current/former peer ID
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +0900733 *
Randy Dunlap637b77f2020-11-29 10:32:48 -0800734 * Return: 0 on success, errno otherwise
Allan Stephens0c3141e2008-04-15 00:22:02 -0700735 *
Allan Stephens2da59912008-07-14 22:43:32 -0700736 * NOTE: This routine doesn't need to take the socket lock since it only
737 * accesses socket information that is unchanging (or which changes in
Allan Stephens0e659672010-12-31 18:59:32 +0000738 * a completely predictable manner).
Per Lidenb97bf3f2006-01-02 19:04:38 +0100739 */
Ying Xue247f0f32014-02-18 16:06:46 +0800740static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100741 int peer)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100742{
Per Lidenb97bf3f2006-01-02 19:04:38 +0100743 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
Parthasarathy Bhuvaragan9fd4b072016-11-01 14:02:46 +0100744 struct sock *sk = sock->sk;
745 struct tipc_sock *tsk = tipc_sk(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100746
Kulikov Vasiliy88f8a5e2010-10-31 07:10:32 +0000747 memset(addr, 0, sizeof(*addr));
Allan Stephens0c3141e2008-04-15 00:22:02 -0700748 if (peer) {
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100749 if ((!tipc_sk_connected(sk)) &&
Parthasarathy Bhuvaragan9fd4b072016-11-01 14:02:46 +0100750 ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
Allan Stephens2da59912008-07-14 22:43:32 -0700751 return -ENOTCONN;
Jon Paul Maloy301bae52014-08-22 18:09:20 -0400752 addr->addr.id.ref = tsk_peer_port(tsk);
753 addr->addr.id.node = tsk_peer_node(tsk);
Allan Stephens0c3141e2008-04-15 00:22:02 -0700754 } else {
Ying Xue07f6c4b2015-01-07 13:41:58 +0800755 addr->addr.id.ref = tsk->portid;
Jon Maloy23fd3ea2018-03-22 20:42:49 +0100756 addr->addr.id.node = tipc_own_addr(sock_net(sk));
Allan Stephens0c3141e2008-04-15 00:22:02 -0700757 }
Per Lidenb97bf3f2006-01-02 19:04:38 +0100758
Jon Maloyb6f88d92020-11-25 13:29:15 -0500759 addr->addrtype = TIPC_SOCKET_ADDR;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100760 addr->family = AF_TIPC;
761 addr->scope = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100762 addr->addr.name.domain = 0;
763
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100764 return sizeof(*addr);
Per Lidenb97bf3f2006-01-02 19:04:38 +0100765}
766
767/**
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700768 * tipc_poll - read and possibly block on pollmask
Per Lidenb97bf3f2006-01-02 19:04:38 +0100769 * @file: file structure associated with the socket
770 * @sock: socket for which to calculate the poll bits
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700771 * @wait: ???
Per Lidenb97bf3f2006-01-02 19:04:38 +0100772 *
Randy Dunlap637b77f2020-11-29 10:32:48 -0800773 * Return: pollmask value
Allan Stephens9b674e82008-03-26 16:48:21 -0700774 *
775 * COMMENTARY:
776 * It appears that the usual socket locking mechanisms are not useful here
777 * since the pollmask info is potentially out-of-date the moment this routine
778 * exits. TCP and other protocols seem to rely on higher level poll routines
779 * to handle any preventable race conditions, so TIPC will do the same ...
780 *
Allan Stephensf662c072010-08-17 11:00:06 +0000781 * IMPORTANT: The fact that a read or write operation is indicated does NOT
782 * imply that the operation will succeed, merely that it should be performed
783 * and will not block.
Per Lidenb97bf3f2006-01-02 19:04:38 +0100784 */
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700785static __poll_t tipc_poll(struct file *file, struct socket *sock,
786 poll_table *wait)
Per Lidenb97bf3f2006-01-02 19:04:38 +0100787{
Allan Stephens9b674e82008-03-26 16:48:21 -0700788 struct sock *sk = sock->sk;
Jon Paul Maloy58ed9442014-03-12 11:31:12 -0400789 struct tipc_sock *tsk = tipc_sk(sk);
Al Viroade994f2017-07-03 00:01:49 -0400790 __poll_t revents = 0;
Allan Stephens9b674e82008-03-26 16:48:21 -0700791
Karsten Graul89ab0662018-10-23 13:40:39 +0200792 sock_poll_wait(file, sock, wait);
Tuong Lien01e661e2018-12-19 09:17:58 +0700793 trace_tipc_sk_poll(sk, NULL, TIPC_DUMP_ALL, " ");
Linus Torvaldsa11e1d42018-06-28 09:43:44 -0700794
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100795 if (sk->sk_shutdown & RCV_SHUTDOWN)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800796 revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100797 if (sk->sk_shutdown == SHUTDOWN_MASK)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800798 revents |= EPOLLHUP;
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +0100799
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100800 switch (sk->sk_state) {
801 case TIPC_ESTABLISHED:
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500802 if (!tsk->cong_link_cnt && !tsk_conn_cong(tsk))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800803 revents |= EPOLLOUT;
Miaohe Lin7f8901b2020-08-18 08:07:13 -0400804 fallthrough;
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100805 case TIPC_LISTEN:
Parthasarathy Bhuvaraganff946832019-05-09 07:13:42 +0200806 case TIPC_CONNECTING:
Eric Dumazet3ef7cf52019-10-23 22:44:50 -0700807 if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800808 revents |= EPOLLIN | EPOLLRDNORM;
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100809 break;
810 case TIPC_OPEN:
Jon Maloy60c25302018-01-17 16:42:46 +0100811 if (tsk->group_is_open && !tsk->cong_link_cnt)
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800812 revents |= EPOLLOUT;
Jon Maloyae236fb2017-10-13 11:04:25 +0200813 if (!tipc_sk_type_connectionless(sk))
814 break;
Eric Dumazet3ef7cf52019-10-23 22:44:50 -0700815 if (skb_queue_empty_lockless(&sk->sk_receive_queue))
Jon Maloyae236fb2017-10-13 11:04:25 +0200816 break;
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800817 revents |= EPOLLIN | EPOLLRDNORM;
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100818 break;
819 case TIPC_DISCONNECTING:
Linus Torvaldsa9a08842018-02-11 14:34:03 -0800820 revents = EPOLLIN | EPOLLRDNORM | EPOLLHUP;
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +0100821 break;
Allan Stephensf662c072010-08-17 11:00:06 +0000822 }
Jon Maloyae236fb2017-10-13 11:04:25 +0200823 return revents;
Per Lidenb97bf3f2006-01-02 19:04:38 +0100824}
825
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400826/**
827 * tipc_sendmcast - send multicast message
828 * @sock: socket structure
829 * @seq: destination address
Al Viro562640f2014-11-15 01:13:43 -0500830 * @msg: message to send
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500831 * @dlen: length of data to send
832 * @timeout: timeout to wait for wakeup
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400833 *
834 * Called from function tipc_sendmsg(), which has done all sanity checks
Randy Dunlap637b77f2020-11-29 10:32:48 -0800835 * Return: the number of bytes sent on success, or errno
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400836 */
Jon Maloyb6f88d92020-11-25 13:29:15 -0500837static int tipc_sendmcast(struct socket *sock, struct tipc_service_range *seq,
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500838 struct msghdr *msg, size_t dlen, long timeout)
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400839{
840 struct sock *sk = sock->sk;
Jon Paul Maloyc5898632015-02-05 08:36:36 -0500841 struct tipc_sock *tsk = tipc_sk(sk);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500842 struct tipc_msg *hdr = &tsk->phdr;
Ying Xuef2f98002015-01-09 15:27:05 +0800843 struct net *net = sock_net(sk);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500844 int mtu = tipc_bcast_get_mtu(net);
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500845 struct tipc_mc_method *method = &tsk->mc_method;
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500846 struct sk_buff_head pkts;
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500847 struct tipc_nlist dsts;
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400848 int rc;
849
Jon Maloy75da2162017-10-13 11:04:23 +0200850 if (tsk->group)
851 return -EACCES;
852
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500853 /* Block or return if any destination link is congested */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500854 rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
855 if (unlikely(rc))
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400856 return rc;
857
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500858 /* Lookup destination nodes */
859 tipc_nlist_init(&dsts, tipc_own_addr(net));
860 tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
Jon Maloye9a03442018-01-12 20:56:50 +0100861 seq->upper, &dsts);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500862 if (!dsts.local && !dsts.remote)
863 return -EHOSTUNREACH;
864
865 /* Build message header */
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500866 msg_set_type(hdr, TIPC_MCAST_MSG);
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500867 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500868 msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
869 msg_set_destport(hdr, 0);
870 msg_set_destnode(hdr, 0);
871 msg_set_nametype(hdr, seq->type);
872 msg_set_namelower(hdr, seq->lower);
873 msg_set_nameupper(hdr, seq->upper);
Jon Paul Maloy22d85c72015-07-16 16:54:23 -0400874
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500875 /* Build message as chain of buffers */
Jon Maloye654f9f2019-08-15 16:42:50 +0200876 __skb_queue_head_init(&pkts);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500877 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500878
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500879 /* Send message if build was successful */
Tuong Lien01e661e2018-12-19 09:17:58 +0700880 if (unlikely(rc == dlen)) {
881 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts),
882 TIPC_DUMP_SK_SNDQ, " ");
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -0500883 rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500884 &tsk->cong_link_cnt);
Tuong Lien01e661e2018-12-19 09:17:58 +0700885 }
Jon Paul Maloya853e4c2017-01-18 13:50:52 -0500886
887 tipc_nlist_purge(&dsts);
Jon Paul Maloy365ad352017-01-03 10:55:11 -0500888
889 return rc ? rc : dlen;
Jon Paul Maloy0abd8ff2014-07-16 20:41:01 -0400890}
891
Jon Paul Maloycb1b7282015-02-05 08:36:44 -0500892/**
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200893 * tipc_send_group_msg - send a message to a member in the group
894 * @net: network namespace
Randy Dunlapf172f4b2020-11-29 10:32:49 -0800895 * @tsk: tipc socket
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200896 * @m: message to send
897 * @mb: group member
898 * @dnode: destination node
899 * @dport: destination port
900 * @dlen: total length of message data
901 */
902static int tipc_send_group_msg(struct net *net, struct tipc_sock *tsk,
903 struct msghdr *m, struct tipc_member *mb,
904 u32 dnode, u32 dport, int dlen)
905{
Jon Maloyb87a5ea2017-10-13 11:04:30 +0200906 u16 bc_snd_nxt = tipc_group_bc_snd_nxt(tsk->group);
Jon Maloy2f487712017-10-13 11:04:31 +0200907 struct tipc_mc_method *method = &tsk->mc_method;
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200908 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
909 struct tipc_msg *hdr = &tsk->phdr;
910 struct sk_buff_head pkts;
911 int mtu, rc;
912
913 /* Complete message header */
914 msg_set_type(hdr, TIPC_GRP_UCAST_MSG);
915 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
916 msg_set_destport(hdr, dport);
917 msg_set_destnode(hdr, dnode);
Jon Maloyb87a5ea2017-10-13 11:04:30 +0200918 msg_set_grp_bc_seqno(hdr, bc_snd_nxt);
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200919
920 /* Build message as chain of buffers */
Jon Maloye654f9f2019-08-15 16:42:50 +0200921 __skb_queue_head_init(&pkts);
Hoang Lef73b1282019-10-29 07:51:21 +0700922 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, false);
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200923 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
924 if (unlikely(rc != dlen))
925 return rc;
926
927 /* Send message */
928 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
929 if (unlikely(rc == -ELINKCONG)) {
930 tipc_dest_push(&tsk->cong_links, dnode, 0);
931 tsk->cong_link_cnt++;
932 }
933
Jon Maloy2f487712017-10-13 11:04:31 +0200934 /* Update send window */
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200935 tipc_group_update_member(mb, blks);
936
Jon Maloy2f487712017-10-13 11:04:31 +0200937 /* A broadcast sent within next EXPIRE period must follow same path */
938 method->rcast = true;
939 method->mandatory = true;
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200940 return dlen;
941}
942
943/**
944 * tipc_send_group_unicast - send message to a member in the group
945 * @sock: socket structure
946 * @m: message to send
947 * @dlen: total length of message data
948 * @timeout: timeout to wait for wakeup
949 *
950 * Called from function tipc_sendmsg(), which has done all sanity checks
Randy Dunlap637b77f2020-11-29 10:32:48 -0800951 * Return: the number of bytes sent on success, or errno
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200952 */
953static int tipc_send_group_unicast(struct socket *sock, struct msghdr *m,
954 int dlen, long timeout)
955{
956 struct sock *sk = sock->sk;
957 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
958 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
959 struct tipc_sock *tsk = tipc_sk(sk);
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200960 struct net *net = sock_net(sk);
961 struct tipc_member *mb = NULL;
962 u32 node, port;
963 int rc;
964
965 node = dest->addr.id.node;
966 port = dest->addr.id.ref;
967 if (!port && !node)
968 return -EHOSTUNREACH;
969
970 /* Block or return if destination link or member is congested */
971 rc = tipc_wait_for_cond(sock, &timeout,
972 !tipc_dest_find(&tsk->cong_links, node, 0) &&
Cong Wang143ece62018-12-11 21:43:51 -0800973 tsk->group &&
974 !tipc_group_cong(tsk->group, node, port, blks,
975 &mb));
Jon Maloy27bd9ec2017-10-13 11:04:27 +0200976 if (unlikely(rc))
977 return rc;
978
979 if (unlikely(!mb))
980 return -EHOSTUNREACH;
981
982 rc = tipc_send_group_msg(net, tsk, m, mb, node, port, dlen);
983
984 return rc ? rc : dlen;
985}
986
987/**
Jon Maloyee106d72017-10-13 11:04:28 +0200988 * tipc_send_group_anycast - send message to any member with given identity
989 * @sock: socket structure
990 * @m: message to send
991 * @dlen: total length of message data
992 * @timeout: timeout to wait for wakeup
993 *
994 * Called from function tipc_sendmsg(), which has done all sanity checks
Randy Dunlap637b77f2020-11-29 10:32:48 -0800995 * Return: the number of bytes sent on success, or errno
Jon Maloyee106d72017-10-13 11:04:28 +0200996 */
997static int tipc_send_group_anycast(struct socket *sock, struct msghdr *m,
998 int dlen, long timeout)
999{
1000 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1001 struct sock *sk = sock->sk;
1002 struct tipc_sock *tsk = tipc_sk(sk);
1003 struct list_head *cong_links = &tsk->cong_links;
1004 int blks = tsk_blocks(GROUP_H_SIZE + dlen);
Jon Maloy232d07b2018-01-08 21:03:30 +01001005 struct tipc_msg *hdr = &tsk->phdr;
Jon Maloyee106d72017-10-13 11:04:28 +02001006 struct tipc_member *first = NULL;
1007 struct tipc_member *mbr = NULL;
1008 struct net *net = sock_net(sk);
1009 u32 node, port, exclude;
Jon Maloyee106d72017-10-13 11:04:28 +02001010 struct list_head dsts;
Jon Maloy232d07b2018-01-08 21:03:30 +01001011 u32 type, inst, scope;
Jon Maloyee106d72017-10-13 11:04:28 +02001012 int lookups = 0;
1013 int dstcnt, rc;
1014 bool cong;
1015
1016 INIT_LIST_HEAD(&dsts);
1017
Jon Maloy232d07b2018-01-08 21:03:30 +01001018 type = msg_nametype(hdr);
Jon Maloyee106d72017-10-13 11:04:28 +02001019 inst = dest->addr.name.name.instance;
Jon Maloy232d07b2018-01-08 21:03:30 +01001020 scope = msg_lookup_scope(hdr);
Jon Maloyee106d72017-10-13 11:04:28 +02001021
1022 while (++lookups < 4) {
Cong Wang143ece62018-12-11 21:43:51 -08001023 exclude = tipc_group_exclude(tsk->group);
1024
Jon Maloyee106d72017-10-13 11:04:28 +02001025 first = NULL;
1026
1027 /* Look for a non-congested destination member, if any */
1028 while (1) {
Jon Maloy232d07b2018-01-08 21:03:30 +01001029 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
Jon Maloyee106d72017-10-13 11:04:28 +02001030 &dstcnt, exclude, false))
1031 return -EHOSTUNREACH;
1032 tipc_dest_pop(&dsts, &node, &port);
Cong Wang143ece62018-12-11 21:43:51 -08001033 cong = tipc_group_cong(tsk->group, node, port, blks,
1034 &mbr);
Jon Maloyee106d72017-10-13 11:04:28 +02001035 if (!cong)
1036 break;
1037 if (mbr == first)
1038 break;
1039 if (!first)
1040 first = mbr;
1041 }
1042
1043 /* Start over if destination was not in member list */
1044 if (unlikely(!mbr))
1045 continue;
1046
1047 if (likely(!cong && !tipc_dest_find(cong_links, node, 0)))
1048 break;
1049
1050 /* Block or return if destination link or member is congested */
1051 rc = tipc_wait_for_cond(sock, &timeout,
1052 !tipc_dest_find(cong_links, node, 0) &&
Cong Wang143ece62018-12-11 21:43:51 -08001053 tsk->group &&
1054 !tipc_group_cong(tsk->group, node, port,
Jon Maloyee106d72017-10-13 11:04:28 +02001055 blks, &mbr));
1056 if (unlikely(rc))
1057 return rc;
1058
1059 /* Send, unless destination disappeared while waiting */
1060 if (likely(mbr))
1061 break;
1062 }
1063
1064 if (unlikely(lookups >= 4))
1065 return -EHOSTUNREACH;
1066
1067 rc = tipc_send_group_msg(net, tsk, m, mbr, node, port, dlen);
1068
1069 return rc ? rc : dlen;
1070}
1071
1072/**
Jon Maloy75da2162017-10-13 11:04:23 +02001073 * tipc_send_group_bcast - send message to all members in communication group
Andrew Lunnd8141202020-07-13 01:15:14 +02001074 * @sock: socket structure
Jon Maloy75da2162017-10-13 11:04:23 +02001075 * @m: message to send
1076 * @dlen: total length of message data
1077 * @timeout: timeout to wait for wakeup
1078 *
1079 * Called from function tipc_sendmsg(), which has done all sanity checks
Randy Dunlap637b77f2020-11-29 10:32:48 -08001080 * Return: the number of bytes sent on success, or errno
Jon Maloy75da2162017-10-13 11:04:23 +02001081 */
1082static int tipc_send_group_bcast(struct socket *sock, struct msghdr *m,
1083 int dlen, long timeout)
1084{
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001085 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
Jon Maloy75da2162017-10-13 11:04:23 +02001086 struct sock *sk = sock->sk;
1087 struct net *net = sock_net(sk);
1088 struct tipc_sock *tsk = tipc_sk(sk);
Cong Wang3c6306d2018-12-16 23:25:12 -08001089 struct tipc_nlist *dsts;
Jon Maloy75da2162017-10-13 11:04:23 +02001090 struct tipc_mc_method *method = &tsk->mc_method;
Jon Maloy2f487712017-10-13 11:04:31 +02001091 bool ack = method->mandatory && method->rcast;
Jon Maloyb7d42632017-10-13 11:04:26 +02001092 int blks = tsk_blocks(MCAST_H_SIZE + dlen);
Jon Maloy75da2162017-10-13 11:04:23 +02001093 struct tipc_msg *hdr = &tsk->phdr;
1094 int mtu = tipc_bcast_get_mtu(net);
1095 struct sk_buff_head pkts;
1096 int rc = -EHOSTUNREACH;
1097
Jon Maloyb7d42632017-10-13 11:04:26 +02001098 /* Block or return if any destination link or member is congested */
Cong Wang143ece62018-12-11 21:43:51 -08001099 rc = tipc_wait_for_cond(sock, &timeout,
1100 !tsk->cong_link_cnt && tsk->group &&
1101 !tipc_group_bc_cong(tsk->group, blks));
Jon Maloy75da2162017-10-13 11:04:23 +02001102 if (unlikely(rc))
1103 return rc;
1104
Cong Wang3c6306d2018-12-16 23:25:12 -08001105 dsts = tipc_group_dests(tsk->group);
1106 if (!dsts->local && !dsts->remote)
1107 return -EHOSTUNREACH;
1108
Jon Maloy75da2162017-10-13 11:04:23 +02001109 /* Complete message header */
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001110 if (dest) {
1111 msg_set_type(hdr, TIPC_GRP_MCAST_MSG);
1112 msg_set_nameinst(hdr, dest->addr.name.name.instance);
1113 } else {
1114 msg_set_type(hdr, TIPC_GRP_BCAST_MSG);
1115 msg_set_nameinst(hdr, 0);
1116 }
Jon Maloyb7d42632017-10-13 11:04:26 +02001117 msg_set_hdr_sz(hdr, GROUP_H_SIZE);
Jon Maloy75da2162017-10-13 11:04:23 +02001118 msg_set_destport(hdr, 0);
1119 msg_set_destnode(hdr, 0);
Cong Wang143ece62018-12-11 21:43:51 -08001120 msg_set_grp_bc_seqno(hdr, tipc_group_bc_snd_nxt(tsk->group));
Jon Maloy75da2162017-10-13 11:04:23 +02001121
Jon Maloy2f487712017-10-13 11:04:31 +02001122 /* Avoid getting stuck with repeated forced replicasts */
1123 msg_set_grp_bc_ack_req(hdr, ack);
1124
Jon Maloy75da2162017-10-13 11:04:23 +02001125 /* Build message as chain of buffers */
Jon Maloye654f9f2019-08-15 16:42:50 +02001126 __skb_queue_head_init(&pkts);
Jon Maloy75da2162017-10-13 11:04:23 +02001127 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1128 if (unlikely(rc != dlen))
1129 return rc;
1130
1131 /* Send message */
Jon Maloy2f487712017-10-13 11:04:31 +02001132 rc = tipc_mcast_xmit(net, &pkts, method, dsts, &tsk->cong_link_cnt);
Jon Maloy75da2162017-10-13 11:04:23 +02001133 if (unlikely(rc))
1134 return rc;
1135
Jon Maloyb7d42632017-10-13 11:04:26 +02001136 /* Update broadcast sequence number and send windows */
Jon Maloy2f487712017-10-13 11:04:31 +02001137 tipc_group_update_bc_members(tsk->group, blks, ack);
1138
1139 /* Broadcast link is now free to choose method for next broadcast */
1140 method->mandatory = false;
1141 method->expires = jiffies;
1142
Jon Maloy75da2162017-10-13 11:04:23 +02001143 return dlen;
1144}
1145
1146/**
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001147 * tipc_send_group_mcast - send message to all members with given identity
1148 * @sock: socket structure
1149 * @m: message to send
1150 * @dlen: total length of message data
1151 * @timeout: timeout to wait for wakeup
1152 *
1153 * Called from function tipc_sendmsg(), which has done all sanity checks
Randy Dunlap637b77f2020-11-29 10:32:48 -08001154 * Return: the number of bytes sent on success, or errno
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001155 */
1156static int tipc_send_group_mcast(struct socket *sock, struct msghdr *m,
1157 int dlen, long timeout)
1158{
1159 struct sock *sk = sock->sk;
1160 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001161 struct tipc_sock *tsk = tipc_sk(sk);
1162 struct tipc_group *grp = tsk->group;
Jon Maloy232d07b2018-01-08 21:03:30 +01001163 struct tipc_msg *hdr = &tsk->phdr;
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001164 struct net *net = sock_net(sk);
Jon Maloy232d07b2018-01-08 21:03:30 +01001165 u32 type, inst, scope, exclude;
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001166 struct list_head dsts;
Jon Maloy232d07b2018-01-08 21:03:30 +01001167 u32 dstcnt;
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001168
1169 INIT_LIST_HEAD(&dsts);
1170
Jon Maloy232d07b2018-01-08 21:03:30 +01001171 type = msg_nametype(hdr);
1172 inst = dest->addr.name.name.instance;
1173 scope = msg_lookup_scope(hdr);
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001174 exclude = tipc_group_exclude(grp);
Jon Maloy232d07b2018-01-08 21:03:30 +01001175
1176 if (!tipc_nametbl_lookup(net, type, inst, scope, &dsts,
1177 &dstcnt, exclude, true))
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001178 return -EHOSTUNREACH;
1179
1180 if (dstcnt == 1) {
1181 tipc_dest_pop(&dsts, &dest->addr.id.node, &dest->addr.id.ref);
1182 return tipc_send_group_unicast(sock, m, dlen, timeout);
1183 }
1184
1185 tipc_dest_list_purge(&dsts);
1186 return tipc_send_group_bcast(sock, m, dlen, timeout);
1187}
1188
1189/**
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001190 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
Randy Dunlapf172f4b2020-11-29 10:32:49 -08001191 * @net: the associated network namespace
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001192 * @arrvq: queue with arriving messages, to be cloned after destination lookup
1193 * @inputq: queue with cloned messages, delivered to socket after dest lookup
1194 *
1195 * Multi-threaded: parallel calls with reference to same queues may occur
Jon Paul Maloy078bec82014-07-16 20:41:00 -04001196 */
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001197void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
1198 struct sk_buff_head *inputq)
Jon Paul Maloy078bec82014-07-16 20:41:00 -04001199{
Jon Maloy75da2162017-10-13 11:04:23 +02001200 u32 self = tipc_own_addr(net);
Jon Maloy232d07b2018-01-08 21:03:30 +01001201 u32 type, lower, upper, scope;
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001202 struct sk_buff *skb, *_skb;
Colin Ian Kingb053fcc2018-07-31 17:01:37 +01001203 u32 portid, onode;
Jon Maloy232d07b2018-01-08 21:03:30 +01001204 struct sk_buff_head tmpq;
Jon Maloy75da2162017-10-13 11:04:23 +02001205 struct list_head dports;
Jon Maloy232d07b2018-01-08 21:03:30 +01001206 struct tipc_msg *hdr;
1207 int user, mtyp, hlen;
1208 bool exact;
Jon Paul Maloy3c724ac2015-02-05 08:36:43 -05001209
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001210 __skb_queue_head_init(&tmpq);
Jon Paul Maloy4d8642d2017-01-03 10:55:10 -05001211 INIT_LIST_HEAD(&dports);
Jon Paul Maloy078bec82014-07-16 20:41:00 -04001212
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001213 skb = tipc_skb_peek(arrvq, &inputq->lock);
1214 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
Jon Maloy232d07b2018-01-08 21:03:30 +01001215 hdr = buf_msg(skb);
1216 user = msg_user(hdr);
1217 mtyp = msg_type(hdr);
1218 hlen = skb_headroom(skb) + msg_hdr_sz(hdr);
Jon Maloy232d07b2018-01-08 21:03:30 +01001219 onode = msg_orignode(hdr);
1220 type = msg_nametype(hdr);
1221
Jon Maloy2f487712017-10-13 11:04:31 +02001222 if (mtyp == TIPC_GRP_UCAST_MSG || user == GROUP_PROTOCOL) {
1223 spin_lock_bh(&inputq->lock);
1224 if (skb_peek(arrvq) == skb) {
1225 __skb_dequeue(arrvq);
1226 __skb_queue_tail(inputq, skb);
1227 }
Jon Maloyc545a942017-12-11 19:11:55 +01001228 kfree_skb(skb);
Jon Maloy2f487712017-10-13 11:04:31 +02001229 spin_unlock_bh(&inputq->lock);
1230 continue;
1231 }
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001232
Jon Maloy232d07b2018-01-08 21:03:30 +01001233 /* Group messages require exact scope match */
1234 if (msg_in_group(hdr)) {
1235 lower = 0;
1236 upper = ~0;
1237 scope = msg_lookup_scope(hdr);
1238 exact = true;
1239 } else {
1240 /* TIPC_NODE_SCOPE means "any scope" in this context */
1241 if (onode == self)
1242 scope = TIPC_NODE_SCOPE;
1243 else
1244 scope = TIPC_CLUSTER_SCOPE;
1245 exact = false;
1246 lower = msg_namelower(hdr);
1247 upper = msg_nameupper(hdr);
Jon Maloy75da2162017-10-13 11:04:23 +02001248 }
Jon Maloy232d07b2018-01-08 21:03:30 +01001249
1250 /* Create destination port list: */
1251 tipc_nametbl_mc_lookup(net, type, lower, upper,
1252 scope, exact, &dports);
1253
1254 /* Clone message per destination */
Jon Maloya80ae532017-10-13 11:04:22 +02001255 while (tipc_dest_pop(&dports, NULL, &portid)) {
Jon Maloy232d07b2018-01-08 21:03:30 +01001256 _skb = __pskb_copy(skb, hlen, GFP_ATOMIC);
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001257 if (_skb) {
1258 msg_set_destport(buf_msg(_skb), portid);
1259 __skb_queue_tail(&tmpq, _skb);
1260 continue;
1261 }
1262 pr_warn("Failed to clone mcast rcv buffer\n");
Jon Paul Maloy078bec82014-07-16 20:41:00 -04001263 }
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001264 /* Append to inputq if not already done by other thread */
1265 spin_lock_bh(&inputq->lock);
1266 if (skb_peek(arrvq) == skb) {
1267 skb_queue_splice_tail_init(&tmpq, inputq);
1268 kfree_skb(__skb_dequeue(arrvq));
1269 }
1270 spin_unlock_bh(&inputq->lock);
1271 __skb_queue_purge(&tmpq);
1272 kfree_skb(skb);
Jon Paul Maloy078bec82014-07-16 20:41:00 -04001273 }
Jon Paul Maloycb1b7282015-02-05 08:36:44 -05001274 tipc_sk_rcv(net, inputq);
Jon Paul Maloy078bec82014-07-16 20:41:00 -04001275}
1276
Jon Maloyc0bceb92019-10-30 14:00:41 +01001277/* tipc_sk_push_backlog(): send accumulated buffers in socket write queue
1278 * when socket is in Nagle mode
1279 */
Tuong Lien0a3e0602020-05-26 16:38:38 +07001280static void tipc_sk_push_backlog(struct tipc_sock *tsk, bool nagle_ack)
Jon Maloyc0bceb92019-10-30 14:00:41 +01001281{
1282 struct sk_buff_head *txq = &tsk->sk.sk_write_queue;
Tuong Lien0a3e0602020-05-26 16:38:38 +07001283 struct sk_buff *skb = skb_peek_tail(txq);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001284 struct net *net = sock_net(&tsk->sk);
1285 u32 dnode = tsk_peer_node(tsk);
1286 int rc;
1287
Tuong Lien0a3e0602020-05-26 16:38:38 +07001288 if (nagle_ack) {
1289 tsk->pkt_cnt += skb_queue_len(txq);
1290 if (!tsk->pkt_cnt || tsk->msg_acc / tsk->pkt_cnt < 2) {
1291 tsk->oneway = 0;
1292 if (tsk->nagle_start < NAGLE_START_MAX)
1293 tsk->nagle_start *= 2;
1294 tsk->expect_ack = false;
1295 pr_debug("tsk %10u: bad nagle %u -> %u, next start %u!\n",
1296 tsk->portid, tsk->msg_acc, tsk->pkt_cnt,
1297 tsk->nagle_start);
1298 } else {
1299 tsk->nagle_start = NAGLE_START_INIT;
1300 if (skb) {
1301 msg_set_ack_required(buf_msg(skb));
1302 tsk->expect_ack = true;
1303 } else {
1304 tsk->expect_ack = false;
1305 }
1306 }
1307 tsk->msg_acc = 0;
1308 tsk->pkt_cnt = 0;
1309 }
1310
Tung Nguyend34910e2019-11-28 10:10:08 +07001311 if (!skb || tsk->cong_link_cnt)
1312 return;
1313
1314 /* Do not send SYN again after congestion */
1315 if (msg_is_syn(buf_msg(skb)))
Jon Maloyc0bceb92019-10-30 14:00:41 +01001316 return;
1317
Tuong Lien0a3e0602020-05-26 16:38:38 +07001318 if (tsk->msg_acc)
1319 tsk->pkt_cnt += skb_queue_len(txq);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001320 tsk->snt_unacked += tsk->snd_backlog;
1321 tsk->snd_backlog = 0;
Jon Maloyc0bceb92019-10-30 14:00:41 +01001322 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
1323 if (rc == -ELINKCONG)
1324 tsk->cong_link_cnt = 1;
1325}
1326
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001327/**
Jon Maloy64ac5f52017-10-13 11:04:20 +02001328 * tipc_sk_conn_proto_rcv - receive a connection mng protocol message
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001329 * @tsk: receiving socket
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001330 * @skb: pointer to message buffer.
Randy Dunlapf172f4b2020-11-29 10:32:49 -08001331 * @inputq: buffer list containing the buffers
1332 * @xmitq: output message area
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001333 */
Jon Maloy64ac5f52017-10-13 11:04:20 +02001334static void tipc_sk_conn_proto_rcv(struct tipc_sock *tsk, struct sk_buff *skb,
Parthasarathy Bhuvaragane7eb0582018-10-10 17:50:23 +02001335 struct sk_buff_head *inputq,
Jon Maloy64ac5f52017-10-13 11:04:20 +02001336 struct sk_buff_head *xmitq)
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001337{
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001338 struct tipc_msg *hdr = buf_msg(skb);
Jon Maloy64ac5f52017-10-13 11:04:20 +02001339 u32 onode = tsk_own_node(tsk);
1340 struct sock *sk = &tsk->sk;
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001341 int mtyp = msg_type(hdr);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001342 bool was_cong;
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001343
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001344 /* Ignore if connection cannot be validated: */
Tuong Lien01e661e2018-12-19 09:17:58 +07001345 if (!tsk_peer_msg(tsk, hdr)) {
1346 trace_tipc_sk_drop_msg(sk, skb, TIPC_DUMP_NONE, "@proto_rcv!");
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001347 goto exit;
Tuong Lien01e661e2018-12-19 09:17:58 +07001348 }
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001349
Parthasarathy Bhuvaraganc1be7752017-04-26 10:05:02 +02001350 if (unlikely(msg_errcode(hdr))) {
1351 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
1352 tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
1353 tsk_peer_port(tsk));
1354 sk->sk_state_change(sk);
Parthasarathy Bhuvaragane7eb0582018-10-10 17:50:23 +02001355
1356 /* State change is ignored if socket already awake,
1357 * - convert msg to abort msg and add to inqueue
1358 */
1359 msg_set_user(hdr, TIPC_CRITICAL_IMPORTANCE);
1360 msg_set_type(hdr, TIPC_CONN_MSG);
1361 msg_set_size(hdr, BASIC_H_SIZE);
1362 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1363 __skb_queue_tail(inputq, skb);
1364 return;
Parthasarathy Bhuvaraganc1be7752017-04-26 10:05:02 +02001365 }
1366
Parthasarathy Bhuvaragan8ea642e2016-11-01 14:02:44 +01001367 tsk->probe_unacked = false;
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001368
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001369 if (mtyp == CONN_PROBE) {
1370 msg_set_type(hdr, CONN_PROBE_REPLY);
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04001371 if (tipc_msg_reverse(onode, &skb, TIPC_OK))
1372 __skb_queue_tail(xmitq, skb);
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001373 return;
1374 } else if (mtyp == CONN_ACK) {
Jon Maloyc0bceb92019-10-30 14:00:41 +01001375 was_cong = tsk_conn_cong(tsk);
Tuong Lien0a3e0602020-05-26 16:38:38 +07001376 tipc_sk_push_backlog(tsk, msg_nagle_ack(hdr));
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04001377 tsk->snt_unacked -= msg_conn_ack(hdr);
1378 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1379 tsk->snd_win = msg_adv_win(hdr);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001380 if (was_cong && !tsk_conn_cong(tsk))
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001381 sk->sk_write_space(sk);
1382 } else if (mtyp != CONN_PROBE_REPLY) {
1383 pr_warn("Received unknown CONN_PROTO msg\n");
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001384 }
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001385exit:
Jon Paul Maloybcd3ffd2015-07-22 10:11:19 -04001386 kfree_skb(skb);
Jon Paul Maloyac0074e2014-06-25 20:41:41 -05001387}
1388
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001389/**
Ying Xue247f0f32014-02-18 16:06:46 +08001390 * tipc_sendmsg - send message in connectionless manner
Per Lidenb97bf3f2006-01-02 19:04:38 +01001391 * @sock: socket structure
1392 * @m: message to send
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001393 * @dsz: amount of user data to be sent
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001394 *
Per Lidenb97bf3f2006-01-02 19:04:38 +01001395 * Message must have an destination specified explicitly.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001396 * Used for SOCK_RDM and SOCK_DGRAM messages,
Per Lidenb97bf3f2006-01-02 19:04:38 +01001397 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
1398 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001399 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08001400 * Return: the number of bytes sent on success, or errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01001401 */
Ying Xue1b784142015-03-02 15:37:48 +08001402static int tipc_sendmsg(struct socket *sock,
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001403 struct msghdr *m, size_t dsz)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001404{
Ying Xue39a0295f2015-03-02 15:37:47 +08001405 struct sock *sk = sock->sk;
1406 int ret;
1407
1408 lock_sock(sk);
1409 ret = __tipc_sendmsg(sock, m, dsz);
1410 release_sock(sk);
1411
1412 return ret;
1413}
1414
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001415static int __tipc_sendmsg(struct socket *sock, struct msghdr *m, size_t dlen)
Ying Xue39a0295f2015-03-02 15:37:47 +08001416{
Allan Stephens0c3141e2008-04-15 00:22:02 -07001417 struct sock *sk = sock->sk;
Ying Xuef2f98002015-01-09 15:27:05 +08001418 struct net *net = sock_net(sk);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001419 struct tipc_sock *tsk = tipc_sk(sk);
1420 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1421 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
1422 struct list_head *clinks = &tsk->cong_links;
1423 bool syn = !tipc_sk_type_connectionless(sk);
Jon Maloy75da2162017-10-13 11:04:23 +02001424 struct tipc_group *grp = tsk->group;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001425 struct tipc_msg *hdr = &tsk->phdr;
Jon Maloyb6f88d92020-11-25 13:29:15 -05001426 struct tipc_service_range *seq;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001427 struct sk_buff_head pkts;
Tuong Lienabc9b4e2019-12-10 15:21:04 +07001428 u32 dport = 0, dnode = 0;
1429 u32 type = 0, inst = 0;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001430 int mtu, rc;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001431
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001432 if (unlikely(dlen > TIPC_MAX_USER_MSG_SIZE))
Allan Stephensc29c3f72010-04-20 17:58:24 -04001433 return -EMSGSIZE;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001434
Jon Maloy27bd9ec2017-10-13 11:04:27 +02001435 if (likely(dest)) {
1436 if (unlikely(m->msg_namelen < sizeof(*dest)))
1437 return -EINVAL;
1438 if (unlikely(dest->family != AF_TIPC))
1439 return -EINVAL;
1440 }
1441
1442 if (grp) {
1443 if (!dest)
1444 return tipc_send_group_bcast(sock, m, dlen, timeout);
Jon Maloyb6f88d92020-11-25 13:29:15 -05001445 if (dest->addrtype == TIPC_SERVICE_ADDR)
Jon Maloyee106d72017-10-13 11:04:28 +02001446 return tipc_send_group_anycast(sock, m, dlen, timeout);
Jon Maloyb6f88d92020-11-25 13:29:15 -05001447 if (dest->addrtype == TIPC_SOCKET_ADDR)
Jon Maloy27bd9ec2017-10-13 11:04:27 +02001448 return tipc_send_group_unicast(sock, m, dlen, timeout);
Jon Maloy5b8dddb2017-10-13 11:04:29 +02001449 if (dest->addrtype == TIPC_ADDR_MCAST)
1450 return tipc_send_group_mcast(sock, m, dlen, timeout);
Jon Maloy27bd9ec2017-10-13 11:04:27 +02001451 return -EINVAL;
1452 }
Jon Maloy75da2162017-10-13 11:04:23 +02001453
Erik Hugnef2f80362015-03-19 09:02:19 +01001454 if (unlikely(!dest)) {
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001455 dest = &tsk->peer;
Erik Hugne0e632082019-03-04 23:26:10 +01001456 if (!syn && dest->family != AF_TIPC)
Erik Hugnef2f80362015-03-19 09:02:19 +01001457 return -EDESTADDRREQ;
Erik Hugnef2f80362015-03-19 09:02:19 +01001458 }
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001459
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001460 if (unlikely(syn)) {
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +01001461 if (sk->sk_state == TIPC_LISTEN)
Ying Xue39a0295f2015-03-02 15:37:47 +08001462 return -EPIPE;
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +01001463 if (sk->sk_state != TIPC_OPEN)
Ying Xue39a0295f2015-03-02 15:37:47 +08001464 return -EISCONN;
1465 if (tsk->published)
1466 return -EOPNOTSUPP;
Jon Maloyb6f88d92020-11-25 13:29:15 -05001467 if (dest->addrtype == TIPC_SERVICE_ADDR) {
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001468 tsk->conn_type = dest->addr.name.name.type;
1469 tsk->conn_instance = dest->addr.name.name.instance;
Allan Stephens0c3141e2008-04-15 00:22:02 -07001470 }
Jon Maloy25b92212018-09-28 20:23:21 +02001471 msg_set_syn(hdr, 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001472 }
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001473
Erik Hugnef2f80362015-03-19 09:02:19 +01001474 seq = &dest->addr.nameseq;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001475 if (dest->addrtype == TIPC_ADDR_MCAST)
1476 return tipc_sendmcast(sock, seq, m, dlen, timeout);
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001477
Jon Maloyb6f88d92020-11-25 13:29:15 -05001478 if (dest->addrtype == TIPC_SERVICE_ADDR) {
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001479 type = dest->addr.name.name.type;
1480 inst = dest->addr.name.name.instance;
Jon Maloy928df182018-03-15 16:48:51 +01001481 dnode = dest->addr.name.domain;
Ying Xue4ac1c8d2015-01-09 15:27:09 +08001482 dport = tipc_nametbl_translate(net, type, inst, &dnode);
Ying Xue39a0295f2015-03-02 15:37:47 +08001483 if (unlikely(!dport && !dnode))
1484 return -EHOSTUNREACH;
Jon Maloyb6f88d92020-11-25 13:29:15 -05001485 } else if (dest->addrtype == TIPC_SOCKET_ADDR) {
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001486 dnode = dest->addr.id.node;
Jon Maloy335b9292018-04-12 01:15:48 +02001487 } else {
1488 return -EINVAL;
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001489 }
1490
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001491 /* Block or return if destination link is congested */
Jon Maloya80ae532017-10-13 11:04:22 +02001492 rc = tipc_wait_for_cond(sock, &timeout,
1493 !tipc_dest_find(clinks, dnode, 0));
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001494 if (unlikely(rc))
Ying Xue39a0295f2015-03-02 15:37:47 +08001495 return rc;
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001496
Jon Maloyb6f88d92020-11-25 13:29:15 -05001497 if (dest->addrtype == TIPC_SERVICE_ADDR) {
Tuong Lienabc9b4e2019-12-10 15:21:04 +07001498 msg_set_type(hdr, TIPC_NAMED_MSG);
1499 msg_set_hdr_sz(hdr, NAMED_H_SIZE);
1500 msg_set_nametype(hdr, type);
1501 msg_set_nameinst(hdr, inst);
1502 msg_set_lookup_scope(hdr, tipc_node2scope(dnode));
1503 msg_set_destnode(hdr, dnode);
1504 msg_set_destport(hdr, dport);
Jon Maloyb6f88d92020-11-25 13:29:15 -05001505 } else { /* TIPC_SOCKET_ADDR */
Tuong Lienabc9b4e2019-12-10 15:21:04 +07001506 msg_set_type(hdr, TIPC_DIRECT_MSG);
1507 msg_set_lookup_scope(hdr, 0);
1508 msg_set_destnode(hdr, dnode);
1509 msg_set_destport(hdr, dest->addr.id.ref);
1510 msg_set_hdr_sz(hdr, BASIC_H_SIZE);
1511 }
1512
Jon Maloye654f9f2019-08-15 16:42:50 +02001513 __skb_queue_head_init(&pkts);
Hoang Le8b1e5b0a2020-03-26 09:50:29 +07001514 mtu = tipc_node_get_mtu(net, dnode, tsk->portid, true);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001515 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts);
1516 if (unlikely(rc != dlen))
1517 return rc;
Tung Nguyen2fe97a52019-11-28 10:10:05 +07001518 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) {
1519 __skb_queue_purge(&pkts);
Tung Nguyen67879272018-09-28 20:23:22 +02001520 return -ENOMEM;
Tung Nguyen2fe97a52019-11-28 10:10:05 +07001521 }
Jon Paul Maloye2dafe82014-06-25 20:41:37 -05001522
Tuong Lien01e661e2018-12-19 09:17:58 +07001523 trace_tipc_sk_sendmsg(sk, skb_peek(&pkts), TIPC_DUMP_SK_SNDQ, " ");
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001524 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid);
1525 if (unlikely(rc == -ELINKCONG)) {
Jon Maloya80ae532017-10-13 11:04:22 +02001526 tipc_dest_push(clinks, dnode, 0);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001527 tsk->cong_link_cnt++;
1528 rc = 0;
1529 }
1530
1531 if (unlikely(syn && !rc))
1532 tipc_set_sk_state(sk, TIPC_CONNECTING);
1533
1534 return rc ? rc : dlen;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001535}
1536
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001537/**
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001538 * tipc_sendstream - send stream-oriented data
Per Lidenb97bf3f2006-01-02 19:04:38 +01001539 * @sock: socket structure
1540 * @m: data to send
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001541 * @dsz: total length of data to be transmitted
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001542 *
Per Lidenb97bf3f2006-01-02 19:04:38 +01001543 * Used for SOCK_STREAM data.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001544 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08001545 * Return: the number of bytes sent on success (or partial success),
Allan Stephens1303e8f2006-06-25 23:46:50 -07001546 * or errno if no data sent
Per Lidenb97bf3f2006-01-02 19:04:38 +01001547 */
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001548static int tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dsz)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001549{
Allan Stephens0c3141e2008-04-15 00:22:02 -07001550 struct sock *sk = sock->sk;
Ying Xue39a0295f2015-03-02 15:37:47 +08001551 int ret;
1552
1553 lock_sock(sk);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001554 ret = __tipc_sendstream(sock, m, dsz);
Ying Xue39a0295f2015-03-02 15:37:47 +08001555 release_sock(sk);
1556
1557 return ret;
1558}
1559
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001560static int __tipc_sendstream(struct socket *sock, struct msghdr *m, size_t dlen)
Ying Xue39a0295f2015-03-02 15:37:47 +08001561{
1562 struct sock *sk = sock->sk;
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001563 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001564 long timeout = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001565 struct sk_buff_head *txq = &sk->sk_write_queue;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001566 struct tipc_sock *tsk = tipc_sk(sk);
1567 struct tipc_msg *hdr = &tsk->phdr;
1568 struct net *net = sock_net(sk);
Tuong Lien0a3e0602020-05-26 16:38:38 +07001569 struct sk_buff *skb;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001570 u32 dnode = tsk_peer_node(tsk);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001571 int maxnagle = tsk->maxnagle;
1572 int maxpkt = tsk->max_pkt;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001573 int send, sent = 0;
Jon Maloyc0bceb92019-10-30 14:00:41 +01001574 int blocks, rc = 0;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001575
1576 if (unlikely(dlen > INT_MAX))
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001577 return -EMSGSIZE;
Allan Stephens0c3141e2008-04-15 00:22:02 -07001578
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001579 /* Handle implicit connection setup */
1580 if (unlikely(dest)) {
1581 rc = __tipc_sendmsg(sock, m, dlen);
Parthasarathy Bhuvaragan92ef12b2018-09-25 18:21:58 +02001582 if (dlen && dlen == rc) {
1583 tsk->peer_caps = tipc_node_get_capabilities(net, dnode);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001584 tsk->snt_unacked = tsk_inc(tsk, dlen + msg_hdr_sz(hdr));
Parthasarathy Bhuvaragan92ef12b2018-09-25 18:21:58 +02001585 }
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001586 return rc;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001587 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001588
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001589 do {
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001590 rc = tipc_wait_for_cond(sock, &timeout,
1591 (!tsk->cong_link_cnt &&
Jon Paul Maloy8c44e1a2017-01-03 10:55:09 -05001592 !tsk_conn_cong(tsk) &&
1593 tipc_sk_connected(sk)));
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001594 if (unlikely(rc))
1595 break;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001596 send = min_t(size_t, dlen - sent, TIPC_MAX_USER_MSG_SIZE);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001597 blocks = tsk->snd_backlog;
Tuong Lienc9aa81f2020-06-11 17:07:35 +07001598 if (tsk->oneway++ >= tsk->nagle_start && maxnagle &&
1599 send <= maxnagle) {
Jon Maloyc0bceb92019-10-30 14:00:41 +01001600 rc = tipc_msg_append(hdr, m, send, maxnagle, txq);
1601 if (unlikely(rc < 0))
1602 break;
1603 blocks += rc;
Tuong Lien0a3e0602020-05-26 16:38:38 +07001604 tsk->msg_acc++;
Jon Maloyc0bceb92019-10-30 14:00:41 +01001605 if (blocks <= 64 && tsk->expect_ack) {
1606 tsk->snd_backlog = blocks;
1607 sent += send;
1608 break;
Tuong Lien0a3e0602020-05-26 16:38:38 +07001609 } else if (blocks > 64) {
1610 tsk->pkt_cnt += skb_queue_len(txq);
1611 } else {
1612 skb = skb_peek_tail(txq);
YueHaibing4c21daa2020-05-28 22:34:07 +08001613 if (skb) {
1614 msg_set_ack_required(buf_msg(skb));
1615 tsk->expect_ack = true;
1616 } else {
1617 tsk->expect_ack = false;
1618 }
Tuong Lien0a3e0602020-05-26 16:38:38 +07001619 tsk->msg_acc = 0;
1620 tsk->pkt_cnt = 0;
Jon Maloyc0bceb92019-10-30 14:00:41 +01001621 }
Jon Maloyc0bceb92019-10-30 14:00:41 +01001622 } else {
1623 rc = tipc_msg_build(hdr, m, sent, send, maxpkt, txq);
1624 if (unlikely(rc != send))
1625 break;
1626 blocks += tsk_inc(tsk, send + MIN_H_SIZE);
1627 }
1628 trace_tipc_sk_sendstream(sk, skb_peek(txq),
Tuong Lien01e661e2018-12-19 09:17:58 +07001629 TIPC_DUMP_SK_SNDQ, " ");
Jon Maloyc0bceb92019-10-30 14:00:41 +01001630 rc = tipc_node_xmit(net, txq, dnode, tsk->portid);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001631 if (unlikely(rc == -ELINKCONG)) {
1632 tsk->cong_link_cnt = 1;
1633 rc = 0;
1634 }
1635 if (likely(!rc)) {
Jon Maloyc0bceb92019-10-30 14:00:41 +01001636 tsk->snt_unacked += blocks;
1637 tsk->snd_backlog = 0;
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001638 sent += send;
1639 }
1640 } while (sent < dlen && !rc);
1641
Parthasarathy Bhuvaragan3364d612017-04-24 15:00:42 +02001642 return sent ? sent : rc;
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001643}
1644
1645/**
1646 * tipc_send_packet - send a connection-oriented message
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001647 * @sock: socket structure
1648 * @m: message to send
1649 * @dsz: length of data to be transmitted
1650 *
1651 * Used for SOCK_SEQPACKET messages.
1652 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08001653 * Return: the number of bytes sent on success, or errno otherwise
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001654 */
Ying Xue1b784142015-03-02 15:37:48 +08001655static int tipc_send_packet(struct socket *sock, struct msghdr *m, size_t dsz)
Jon Paul Maloy4ccfe5e2014-06-25 20:41:38 -05001656{
1657 if (dsz > TIPC_MAX_USER_MSG_SIZE)
1658 return -EMSGSIZE;
1659
Jon Paul Maloy365ad352017-01-03 10:55:11 -05001660 return tipc_sendstream(sock, m, dsz);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001661}
1662
Jon Paul Maloydadebc02014-08-22 18:09:11 -04001663/* tipc_sk_finish_conn - complete the setup of a connection
Per Lidenb97bf3f2006-01-02 19:04:38 +01001664 */
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001665static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
Jon Paul Maloydadebc02014-08-22 18:09:11 -04001666 u32 peer_node)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001667{
Ying Xue3721e9c2015-01-13 17:07:48 +08001668 struct sock *sk = &tsk->sk;
1669 struct net *net = sock_net(sk);
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001670 struct tipc_msg *msg = &tsk->phdr;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001671
Jon Maloy25b92212018-09-28 20:23:21 +02001672 msg_set_syn(msg, 0);
Jon Paul Maloydadebc02014-08-22 18:09:11 -04001673 msg_set_destnode(msg, peer_node);
1674 msg_set_destport(msg, peer_port);
1675 msg_set_type(msg, TIPC_CONN_MSG);
1676 msg_set_lookup_scope(msg, 0);
1677 msg_set_hdr_sz(msg, SHORT_H_SIZE);
Jon Paul Maloyf9fef182014-03-12 11:31:08 -04001678
Jon Maloy0d5fcebf2017-10-20 11:21:32 +02001679 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
Parthasarathy Bhuvaragan8ea642e2016-11-01 14:02:44 +01001680 tipc_set_sk_state(sk, TIPC_ESTABLISHED);
Ying Xuef2f98002015-01-09 15:27:05 +08001681 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
Hoang Lef73b1282019-10-29 07:51:21 +07001682 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid, true);
Jon Paul Maloy60020e12016-05-02 11:58:46 -04001683 tsk->peer_caps = tipc_node_get_capabilities(net, peer_node);
Jon Maloyc0bceb92019-10-30 14:00:41 +01001684 tsk_set_nagle(tsk);
Tung Nguyen67879272018-09-28 20:23:22 +02001685 __skb_queue_purge(&sk->sk_write_queue);
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04001686 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL)
1687 return;
1688
1689 /* Fall back to message based flow control */
1690 tsk->rcv_win = FLOWCTL_MSG_WIN;
1691 tsk->snd_win = FLOWCTL_MSG_WIN;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001692}
1693
1694/**
Jon Maloy31c82a22017-10-13 11:04:24 +02001695 * tipc_sk_set_orig_addr - capture sender's address for received message
Per Lidenb97bf3f2006-01-02 19:04:38 +01001696 * @m: descriptor for message info
Andrew Lunnd8141202020-07-13 01:15:14 +02001697 * @skb: received message
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001698 *
Per Lidenb97bf3f2006-01-02 19:04:38 +01001699 * Note: Address is not captured if not requested by receiver.
1700 */
Jon Maloy31c82a22017-10-13 11:04:24 +02001701static void tipc_sk_set_orig_addr(struct msghdr *m, struct sk_buff *skb)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001702{
Jon Maloy31c82a22017-10-13 11:04:24 +02001703 DECLARE_SOCKADDR(struct sockaddr_pair *, srcaddr, m->msg_name);
1704 struct tipc_msg *hdr = buf_msg(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001705
Jon Maloy31c82a22017-10-13 11:04:24 +02001706 if (!srcaddr)
1707 return;
1708
1709 srcaddr->sock.family = AF_TIPC;
Jon Maloyb6f88d92020-11-25 13:29:15 -05001710 srcaddr->sock.addrtype = TIPC_SOCKET_ADDR;
Eric Dumazet09c8b972018-05-09 09:50:22 -07001711 srcaddr->sock.scope = 0;
Jon Maloy31c82a22017-10-13 11:04:24 +02001712 srcaddr->sock.addr.id.ref = msg_origport(hdr);
1713 srcaddr->sock.addr.id.node = msg_orignode(hdr);
1714 srcaddr->sock.addr.name.domain = 0;
Jon Maloy31c82a22017-10-13 11:04:24 +02001715 m->msg_namelen = sizeof(struct sockaddr_tipc);
1716
1717 if (!msg_in_group(hdr))
1718 return;
1719
1720 /* Group message users may also want to know sending member's id */
1721 srcaddr->member.family = AF_TIPC;
Jon Maloyb6f88d92020-11-25 13:29:15 -05001722 srcaddr->member.addrtype = TIPC_SERVICE_ADDR;
Eric Dumazet09c8b972018-05-09 09:50:22 -07001723 srcaddr->member.scope = 0;
Jon Maloy31c82a22017-10-13 11:04:24 +02001724 srcaddr->member.addr.name.name.type = msg_nametype(hdr);
1725 srcaddr->member.addr.name.name.instance = TIPC_SKB_CB(skb)->orig_member;
1726 srcaddr->member.addr.name.domain = 0;
1727 m->msg_namelen = sizeof(*srcaddr);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001728}
1729
1730/**
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001731 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
Per Lidenb97bf3f2006-01-02 19:04:38 +01001732 * @m: descriptor for message info
Jon Maloy1c1274a2018-11-17 12:17:06 -05001733 * @skb: received message buffer
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001734 * @tsk: TIPC port associated with message
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001735 *
Per Lidenb97bf3f2006-01-02 19:04:38 +01001736 * Note: Ancillary data is not captured if not requested by receiver.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001737 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08001738 * Return: 0 if successful, otherwise errno
Per Lidenb97bf3f2006-01-02 19:04:38 +01001739 */
Jon Maloy1c1274a2018-11-17 12:17:06 -05001740static int tipc_sk_anc_data_recv(struct msghdr *m, struct sk_buff *skb,
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001741 struct tipc_sock *tsk)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001742{
Jon Maloy1c1274a2018-11-17 12:17:06 -05001743 struct tipc_msg *msg;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001744 u32 anc_data[3];
1745 u32 err;
1746 u32 dest_type;
Allan Stephens3546c752006-06-25 23:45:24 -07001747 int has_name;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001748 int res;
1749
1750 if (likely(m->msg_controllen == 0))
1751 return 0;
Jon Maloy1c1274a2018-11-17 12:17:06 -05001752 msg = buf_msg(skb);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001753
1754 /* Optionally capture errored message object(s) */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001755 err = msg ? msg_errcode(msg) : 0;
1756 if (unlikely(err)) {
1757 anc_data[0] = err;
1758 anc_data[1] = msg_data_sz(msg);
Allan Stephens2db99832010-12-31 18:59:33 +00001759 res = put_cmsg(m, SOL_TIPC, TIPC_ERRINFO, 8, anc_data);
1760 if (res)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001761 return res;
Allan Stephens2db99832010-12-31 18:59:33 +00001762 if (anc_data[1]) {
Jon Maloy1c1274a2018-11-17 12:17:06 -05001763 if (skb_linearize(skb))
1764 return -ENOMEM;
1765 msg = buf_msg(skb);
Allan Stephens2db99832010-12-31 18:59:33 +00001766 res = put_cmsg(m, SOL_TIPC, TIPC_RETDATA, anc_data[1],
1767 msg_data(msg));
1768 if (res)
1769 return res;
1770 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001771 }
1772
1773 /* Optionally capture message destination object */
Per Lidenb97bf3f2006-01-02 19:04:38 +01001774 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
1775 switch (dest_type) {
1776 case TIPC_NAMED_MSG:
Allan Stephens3546c752006-06-25 23:45:24 -07001777 has_name = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001778 anc_data[0] = msg_nametype(msg);
1779 anc_data[1] = msg_namelower(msg);
1780 anc_data[2] = msg_namelower(msg);
1781 break;
1782 case TIPC_MCAST_MSG:
Allan Stephens3546c752006-06-25 23:45:24 -07001783 has_name = 1;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001784 anc_data[0] = msg_nametype(msg);
1785 anc_data[1] = msg_namelower(msg);
1786 anc_data[2] = msg_nameupper(msg);
1787 break;
1788 case TIPC_CONN_MSG:
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001789 has_name = (tsk->conn_type != 0);
1790 anc_data[0] = tsk->conn_type;
1791 anc_data[1] = tsk->conn_instance;
1792 anc_data[2] = tsk->conn_instance;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001793 break;
1794 default:
Allan Stephens3546c752006-06-25 23:45:24 -07001795 has_name = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001796 }
Allan Stephens2db99832010-12-31 18:59:33 +00001797 if (has_name) {
1798 res = put_cmsg(m, SOL_TIPC, TIPC_DESTNAME, 12, anc_data);
1799 if (res)
1800 return res;
1801 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01001802
1803 return 0;
1804}
1805
Tuong Lienc7268582020-05-13 19:33:16 +07001806static struct sk_buff *tipc_sk_build_ack(struct tipc_sock *tsk)
Jon Paul Maloy739f5e42014-08-22 18:09:12 -04001807{
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +01001808 struct sock *sk = &tsk->sk;
Ying Xuea6ca1092014-11-26 11:41:55 +08001809 struct sk_buff *skb = NULL;
Jon Paul Maloy739f5e42014-08-22 18:09:12 -04001810 struct tipc_msg *msg;
Jon Paul Maloy301bae52014-08-22 18:09:20 -04001811 u32 peer_port = tsk_peer_port(tsk);
1812 u32 dnode = tsk_peer_node(tsk);
Jon Paul Maloy739f5e42014-08-22 18:09:12 -04001813
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +01001814 if (!tipc_sk_connected(sk))
Tuong Lienc7268582020-05-13 19:33:16 +07001815 return NULL;
Jon Paul Maloyc5898632015-02-05 08:36:36 -05001816 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1817 dnode, tsk_own_node(tsk), peer_port,
1818 tsk->portid, TIPC_OK);
Ying Xuea6ca1092014-11-26 11:41:55 +08001819 if (!skb)
Tuong Lienc7268582020-05-13 19:33:16 +07001820 return NULL;
Ying Xuea6ca1092014-11-26 11:41:55 +08001821 msg = buf_msg(skb);
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04001822 msg_set_conn_ack(msg, tsk->rcv_unacked);
1823 tsk->rcv_unacked = 0;
1824
1825 /* Adjust to and advertize the correct window limit */
1826 if (tsk->peer_caps & TIPC_BLOCK_FLOWCTL) {
1827 tsk->rcv_win = tsk_adv_blocks(tsk->sk.sk_rcvbuf);
1828 msg_set_adv_win(msg, tsk->rcv_win);
1829 }
Tuong Lienc7268582020-05-13 19:33:16 +07001830 return skb;
1831}
1832
1833static void tipc_sk_send_ack(struct tipc_sock *tsk)
1834{
1835 struct sk_buff *skb;
1836
1837 skb = tipc_sk_build_ack(tsk);
1838 if (!skb)
1839 return;
1840
1841 tipc_node_xmit_skb(sock_net(&tsk->sk), skb, tsk_peer_node(tsk),
1842 msg_link_selector(buf_msg(skb)));
Jon Paul Maloy739f5e42014-08-22 18:09:12 -04001843}
1844
Arnaldo Carvalho de Melo85d3fc92014-05-23 15:55:12 -04001845static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001846{
1847 struct sock *sk = sock->sk;
Tung Nguyen48766a52019-02-19 11:20:48 +07001848 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Arnaldo Carvalho de Melo85d3fc92014-05-23 15:55:12 -04001849 long timeo = *timeop;
Parthasarathy Bhuvaragan4e0df492017-04-26 10:05:01 +02001850 int err = sock_error(sk);
1851
1852 if (err)
1853 return err;
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001854
1855 for (;;) {
Ying Xuefe8e4642014-03-06 14:40:18 +01001856 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +01001857 if (sk->sk_shutdown & RCV_SHUTDOWN) {
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001858 err = -ENOTCONN;
1859 break;
1860 }
Tung Nguyen48766a52019-02-19 11:20:48 +07001861 add_wait_queue(sk_sleep(sk), &wait);
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001862 release_sock(sk);
Tung Nguyen48766a52019-02-19 11:20:48 +07001863 timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo);
1864 sched_annotate_sleep();
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001865 lock_sock(sk);
Tung Nguyen48766a52019-02-19 11:20:48 +07001866 remove_wait_queue(sk_sleep(sk), &wait);
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001867 }
1868 err = 0;
1869 if (!skb_queue_empty(&sk->sk_receive_queue))
1870 break;
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001871 err = -EAGAIN;
1872 if (!timeo)
1873 break;
Erik Hugne143fe222015-03-09 10:43:42 +01001874 err = sock_intr_errno(timeo);
1875 if (signal_pending(current))
1876 break;
Parthasarathy Bhuvaragan4e0df492017-04-26 10:05:01 +02001877
1878 err = sock_error(sk);
1879 if (err)
1880 break;
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001881 }
Arnaldo Carvalho de Melo85d3fc92014-05-23 15:55:12 -04001882 *timeop = timeo;
Ying Xue9bbb4ec2014-01-17 09:50:07 +08001883 return err;
1884}
1885
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001886/**
Ying Xue247f0f32014-02-18 16:06:46 +08001887 * tipc_recvmsg - receive packet-oriented message
Randy Dunlapf172f4b2020-11-29 10:32:49 -08001888 * @sock: network socket
Per Lidenb97bf3f2006-01-02 19:04:38 +01001889 * @m: descriptor for message info
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001890 * @buflen: length of user buffer area
Per Lidenb97bf3f2006-01-02 19:04:38 +01001891 * @flags: receive flags
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001892 *
Per Lidenb97bf3f2006-01-02 19:04:38 +01001893 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
1894 * If the complete message doesn't fit in user area, truncate it.
1895 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08001896 * Return: size of returned message data, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01001897 */
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001898static int tipc_recvmsg(struct socket *sock, struct msghdr *m,
1899 size_t buflen, int flags)
Per Lidenb97bf3f2006-01-02 19:04:38 +01001900{
Allan Stephens0c3141e2008-04-15 00:22:02 -07001901 struct sock *sk = sock->sk;
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001902 bool connected = !tipc_sk_type_connectionless(sk);
Jon Maloyae236fb2017-10-13 11:04:25 +02001903 struct tipc_sock *tsk = tipc_sk(sk);
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001904 int rc, err, hlen, dlen, copy;
Jon Maloyb7d42632017-10-13 11:04:26 +02001905 struct sk_buff_head xmitq;
Jon Maloyae236fb2017-10-13 11:04:25 +02001906 struct tipc_msg *hdr;
1907 struct sk_buff *skb;
1908 bool grp_evt;
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001909 long timeout;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001910
Allan Stephens0c3141e2008-04-15 00:22:02 -07001911 /* Catch invalid receive requests */
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001912 if (unlikely(!buflen))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001913 return -EINVAL;
1914
Allan Stephens0c3141e2008-04-15 00:22:02 -07001915 lock_sock(sk);
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001916 if (unlikely(connected && sk->sk_state == TIPC_OPEN)) {
1917 rc = -ENOTCONN;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001918 goto exit;
1919 }
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001920 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001921
Jon Maloyb7d42632017-10-13 11:04:26 +02001922 /* Step rcv queue to first msg with data or error; wait if necessary */
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001923 do {
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001924 rc = tipc_wait_for_rcvmsg(sock, &timeout);
1925 if (unlikely(rc))
Per Lidenb97bf3f2006-01-02 19:04:38 +01001926 goto exit;
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001927 skb = skb_peek(&sk->sk_receive_queue);
1928 hdr = buf_msg(skb);
1929 dlen = msg_data_sz(hdr);
1930 hlen = msg_hdr_sz(hdr);
1931 err = msg_errcode(hdr);
Jon Maloyae236fb2017-10-13 11:04:25 +02001932 grp_evt = msg_is_grp_evt(hdr);
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001933 if (likely(dlen || err))
1934 break;
1935 tsk_advance_rx_queue(sk);
1936 } while (1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001937
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001938 /* Collect msg meta data, including error code and rejected data */
Jon Maloy31c82a22017-10-13 11:04:24 +02001939 tipc_sk_set_orig_addr(m, skb);
Jon Maloy1c1274a2018-11-17 12:17:06 -05001940 rc = tipc_sk_anc_data_recv(m, skb, tsk);
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001941 if (unlikely(rc))
1942 goto exit;
Jon Maloy1c1274a2018-11-17 12:17:06 -05001943 hdr = buf_msg(skb);
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001944
1945 /* Capture data if non-error msg, otherwise just set return value */
1946 if (likely(!err)) {
1947 copy = min_t(int, dlen, buflen);
1948 if (unlikely(copy != dlen))
1949 m->msg_flags |= MSG_TRUNC;
1950 rc = skb_copy_datagram_msg(skb, hlen, m, copy);
1951 } else {
1952 copy = 0;
1953 rc = 0;
1954 if (err != TIPC_CONN_SHUTDOWN && connected && !m->msg_control)
1955 rc = -ECONNRESET;
1956 }
1957 if (unlikely(rc))
1958 goto exit;
1959
Jon Maloyae236fb2017-10-13 11:04:25 +02001960 /* Mark message as group event if applicable */
1961 if (unlikely(grp_evt)) {
1962 if (msg_grp_evt(hdr) == TIPC_WITHDRAWN)
1963 m->msg_flags |= MSG_EOR;
1964 m->msg_flags |= MSG_OOB;
1965 copy = 0;
1966 }
1967
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001968 /* Caption of data or error code/rejected data was successful */
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04001969 if (unlikely(flags & MSG_PEEK))
1970 goto exit;
1971
Jon Maloyb7d42632017-10-13 11:04:26 +02001972 /* Send group flow control advertisement when applicable */
1973 if (tsk->group && msg_in_group(hdr) && !grp_evt) {
Jon Maloye654f9f2019-08-15 16:42:50 +02001974 __skb_queue_head_init(&xmitq);
Jon Maloyb7d42632017-10-13 11:04:26 +02001975 tipc_group_update_rcv_win(tsk->group, tsk_blocks(hlen + dlen),
1976 msg_orignode(hdr), msg_origport(hdr),
1977 &xmitq);
1978 tipc_node_distr_xmit(sock_net(sk), &xmitq);
1979 }
1980
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04001981 tsk_advance_rx_queue(sk);
Jon Maloyae236fb2017-10-13 11:04:25 +02001982
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001983 if (likely(!connected))
1984 goto exit;
1985
Jon Maloyb7d42632017-10-13 11:04:26 +02001986 /* Send connection flow control advertisement when applicable */
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001987 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
1988 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
1989 tipc_sk_send_ack(tsk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01001990exit:
Allan Stephens0c3141e2008-04-15 00:22:02 -07001991 release_sock(sk);
Jon Paul Maloye9f8b102017-05-02 18:16:53 +02001992 return rc ? rc : copy;
Per Lidenb97bf3f2006-01-02 19:04:38 +01001993}
1994
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09001995/**
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02001996 * tipc_recvstream - receive stream-oriented data
Randy Dunlapf172f4b2020-11-29 10:32:49 -08001997 * @sock: network socket
Per Lidenb97bf3f2006-01-02 19:04:38 +01001998 * @m: descriptor for message info
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02001999 * @buflen: total size of user buffer area
Per Lidenb97bf3f2006-01-02 19:04:38 +01002000 * @flags: receive flags
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002001 *
2002 * Used for SOCK_STREAM messages only. If not enough data is available
Per Lidenb97bf3f2006-01-02 19:04:38 +01002003 * will optionally wait for more; never truncates data.
2004 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08002005 * Return: size of returned message data, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01002006 */
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002007static int tipc_recvstream(struct socket *sock, struct msghdr *m,
2008 size_t buflen, int flags)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002009{
Allan Stephens0c3141e2008-04-15 00:22:02 -07002010 struct sock *sk = sock->sk;
Jon Paul Maloy58ed9442014-03-12 11:31:12 -04002011 struct tipc_sock *tsk = tipc_sk(sk);
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002012 struct sk_buff *skb;
2013 struct tipc_msg *hdr;
2014 struct tipc_skb_cb *skb_cb;
2015 bool peek = flags & MSG_PEEK;
2016 int offset, required, copy, copied = 0;
2017 int hlen, dlen, err, rc;
2018 long timeout;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002019
2020 /* Catch invalid receive attempts */
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002021 if (unlikely(!buflen))
Per Lidenb97bf3f2006-01-02 19:04:38 +01002022 return -EINVAL;
2023
Allan Stephens0c3141e2008-04-15 00:22:02 -07002024 lock_sock(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002025
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +01002026 if (unlikely(sk->sk_state == TIPC_OPEN)) {
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002027 rc = -ENOTCONN;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002028 goto exit;
2029 }
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002030 required = sock_rcvlowat(sk, flags & MSG_WAITALL, buflen);
2031 timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002032
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002033 do {
2034 /* Look at first msg in receive queue; wait if necessary */
2035 rc = tipc_wait_for_rcvmsg(sock, &timeout);
2036 if (unlikely(rc))
2037 break;
2038 skb = skb_peek(&sk->sk_receive_queue);
2039 skb_cb = TIPC_SKB_CB(skb);
2040 hdr = buf_msg(skb);
2041 dlen = msg_data_sz(hdr);
2042 hlen = msg_hdr_sz(hdr);
2043 err = msg_errcode(hdr);
Paul Gortmaker617d3c72012-04-30 15:29:02 -04002044
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002045 /* Discard any empty non-errored (SYN-) message */
2046 if (unlikely(!dlen && !err)) {
2047 tsk_advance_rx_queue(sk);
2048 continue;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002049 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01002050
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002051 /* Collect msg meta data, incl. error code and rejected data */
2052 if (!copied) {
Jon Maloy31c82a22017-10-13 11:04:24 +02002053 tipc_sk_set_orig_addr(m, skb);
Jon Maloy1c1274a2018-11-17 12:17:06 -05002054 rc = tipc_sk_anc_data_recv(m, skb, tsk);
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002055 if (rc)
2056 break;
Jon Maloy1c1274a2018-11-17 12:17:06 -05002057 hdr = buf_msg(skb);
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002058 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01002059
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002060 /* Copy data if msg ok, otherwise return error/partial data */
2061 if (likely(!err)) {
2062 offset = skb_cb->bytes_read;
2063 copy = min_t(int, dlen - offset, buflen - copied);
2064 rc = skb_copy_datagram_msg(skb, hlen + offset, m, copy);
2065 if (unlikely(rc))
2066 break;
2067 copied += copy;
2068 offset += copy;
2069 if (unlikely(offset < dlen)) {
2070 if (!peek)
2071 skb_cb->bytes_read = offset;
2072 break;
2073 }
2074 } else {
2075 rc = 0;
2076 if ((err != TIPC_CONN_SHUTDOWN) && !m->msg_control)
2077 rc = -ECONNRESET;
2078 if (copied || rc)
2079 break;
2080 }
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002081
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002082 if (unlikely(peek))
2083 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002084
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002085 tsk_advance_rx_queue(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002086
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002087 /* Send connection flow control advertisement when applicable */
2088 tsk->rcv_unacked += tsk_inc(tsk, hlen + dlen);
Tuong Lienc7268582020-05-13 19:33:16 +07002089 if (tsk->rcv_unacked >= tsk->rcv_win / TIPC_ACK_RATE)
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002090 tipc_sk_send_ack(tsk);
2091
2092 /* Exit if all requested data or FIN/error received */
2093 if (copied == buflen || err)
2094 break;
2095
2096 } while (!skb_queue_empty(&sk->sk_receive_queue) || copied < required);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002097exit:
Allan Stephens0c3141e2008-04-15 00:22:02 -07002098 release_sock(sk);
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02002099 return copied ? copied : rc;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002100}
2101
2102/**
Ying Xuef288bef2012-08-21 11:16:57 +08002103 * tipc_write_space - wake up thread if port congestion is released
2104 * @sk: socket
2105 */
2106static void tipc_write_space(struct sock *sk)
2107{
2108 struct socket_wq *wq;
2109
2110 rcu_read_lock();
2111 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002112 if (skwq_has_sleeper(wq))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002113 wake_up_interruptible_sync_poll(&wq->wait, EPOLLOUT |
2114 EPOLLWRNORM | EPOLLWRBAND);
Ying Xuef288bef2012-08-21 11:16:57 +08002115 rcu_read_unlock();
2116}
2117
2118/**
2119 * tipc_data_ready - wake up threads to indicate messages have been received
2120 * @sk: socket
Ying Xuef288bef2012-08-21 11:16:57 +08002121 */
David S. Miller676d2362014-04-11 16:15:36 -04002122static void tipc_data_ready(struct sock *sk)
Ying Xuef288bef2012-08-21 11:16:57 +08002123{
2124 struct socket_wq *wq;
2125
2126 rcu_read_lock();
2127 wq = rcu_dereference(sk->sk_wq);
Herbert Xu1ce0bf52015-11-26 13:55:39 +08002128 if (skwq_has_sleeper(wq))
Linus Torvaldsa9a08842018-02-11 14:34:03 -08002129 wake_up_interruptible_sync_poll(&wq->wait, EPOLLIN |
2130 EPOLLRDNORM | EPOLLRDBAND);
Ying Xuef288bef2012-08-21 11:16:57 +08002131 rcu_read_unlock();
2132}
2133
Ying Xuef4195d12015-11-22 15:46:05 +08002134static void tipc_sock_destruct(struct sock *sk)
2135{
2136 __skb_queue_purge(&sk->sk_receive_queue);
2137}
2138
Jon Maloy64ac5f52017-10-13 11:04:20 +02002139static void tipc_sk_proto_rcv(struct sock *sk,
2140 struct sk_buff_head *inputq,
2141 struct sk_buff_head *xmitq)
2142{
2143 struct sk_buff *skb = __skb_dequeue(inputq);
2144 struct tipc_sock *tsk = tipc_sk(sk);
2145 struct tipc_msg *hdr = buf_msg(skb);
Jon Maloy75da2162017-10-13 11:04:23 +02002146 struct tipc_group *grp = tsk->group;
Jon Maloyb7d42632017-10-13 11:04:26 +02002147 bool wakeup = false;
Jon Maloy64ac5f52017-10-13 11:04:20 +02002148
2149 switch (msg_user(hdr)) {
2150 case CONN_MANAGER:
Parthasarathy Bhuvaragane7eb0582018-10-10 17:50:23 +02002151 tipc_sk_conn_proto_rcv(tsk, skb, inputq, xmitq);
Jon Maloy64ac5f52017-10-13 11:04:20 +02002152 return;
2153 case SOCK_WAKEUP:
Jon Maloya80ae532017-10-13 11:04:22 +02002154 tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
Tung Nguyenbfd07f32019-02-25 10:57:20 +07002155 /* coupled with smp_rmb() in tipc_wait_for_cond() */
2156 smp_wmb();
Jon Maloy64ac5f52017-10-13 11:04:20 +02002157 tsk->cong_link_cnt--;
Jon Maloyb7d42632017-10-13 11:04:26 +02002158 wakeup = true;
Tuong Lien0a3e0602020-05-26 16:38:38 +07002159 tipc_sk_push_backlog(tsk, false);
Jon Maloy64ac5f52017-10-13 11:04:20 +02002160 break;
Jon Maloy75da2162017-10-13 11:04:23 +02002161 case GROUP_PROTOCOL:
Jon Maloyb7d42632017-10-13 11:04:26 +02002162 tipc_group_proto_rcv(grp, &wakeup, hdr, inputq, xmitq);
Jon Maloy75da2162017-10-13 11:04:23 +02002163 break;
Jon Maloy64ac5f52017-10-13 11:04:20 +02002164 case TOP_SRV:
Jon Maloyb7d42632017-10-13 11:04:26 +02002165 tipc_group_member_evt(tsk->group, &wakeup, &sk->sk_rcvbuf,
Jon Maloy7ad32bc2018-01-08 21:03:26 +01002166 hdr, inputq, xmitq);
Jon Maloy64ac5f52017-10-13 11:04:20 +02002167 break;
2168 default:
2169 break;
2170 }
2171
Jon Maloyb7d42632017-10-13 11:04:26 +02002172 if (wakeup)
2173 sk->sk_write_space(sk);
2174
Jon Maloy64ac5f52017-10-13 11:04:20 +02002175 kfree_skb(skb);
2176}
2177
Ying Xuef288bef2012-08-21 11:16:57 +08002178/**
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002179 * tipc_sk_filter_connect - check incoming message for a connection-based socket
Jon Paul Maloy58ed9442014-03-12 11:31:12 -04002180 * @tsk: TIPC socket
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002181 * @skb: pointer to message buffer.
Tuong Lienc7268582020-05-13 19:33:16 +07002182 * @xmitq: for Nagle ACK if any
Randy Dunlap637b77f2020-11-29 10:32:48 -08002183 * Return: true if message should be added to receive queue, false otherwise
Ying Xue7e6c1312012-11-29 18:39:14 -05002184 */
Tuong Lienc7268582020-05-13 19:33:16 +07002185static bool tipc_sk_filter_connect(struct tipc_sock *tsk, struct sk_buff *skb,
2186 struct sk_buff_head *xmitq)
Ying Xue7e6c1312012-11-29 18:39:14 -05002187{
Jon Paul Maloy58ed9442014-03-12 11:31:12 -04002188 struct sock *sk = &tsk->sk;
Ying Xuef2f98002015-01-09 15:27:05 +08002189 struct net *net = sock_net(sk);
Jon Paul Maloycda36962015-07-22 10:11:20 -04002190 struct tipc_msg *hdr = buf_msg(skb);
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002191 bool con_msg = msg_connected(hdr);
2192 u32 pport = tsk_peer_port(tsk);
2193 u32 pnode = tsk_peer_node(tsk);
2194 u32 oport = msg_origport(hdr);
2195 u32 onode = msg_orignode(hdr);
2196 int err = msg_errcode(hdr);
Tung Nguyen67879272018-09-28 20:23:22 +02002197 unsigned long delay;
Ying Xue7e6c1312012-11-29 18:39:14 -05002198
Jon Paul Maloycda36962015-07-22 10:11:20 -04002199 if (unlikely(msg_mcast(hdr)))
2200 return false;
Jon Maloyc0bceb92019-10-30 14:00:41 +01002201 tsk->oneway = 0;
Ying Xue7e6c1312012-11-29 18:39:14 -05002202
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002203 switch (sk->sk_state) {
2204 case TIPC_CONNECTING:
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002205 /* Setup ACK */
2206 if (likely(con_msg)) {
2207 if (err)
2208 break;
2209 tipc_sk_finish_conn(tsk, oport, onode);
2210 msg_set_importance(&tsk->phdr, msg_importance(hdr));
2211 /* ACK+ message with data is added to receive queue */
2212 if (msg_data_sz(hdr))
2213 return true;
2214 /* Empty ACK-, - wake up sleeping connect() and drop */
Parthasarathy Bhuvaraganff946832019-05-09 07:13:42 +02002215 sk->sk_state_change(sk);
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002216 msg_set_dest_droppable(hdr, 1);
2217 return false;
Parthasarathy Bhuvaragan4e0df492017-04-26 10:05:01 +02002218 }
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002219 /* Ignore connectionless message if not from listening socket */
2220 if (oport != pport || onode != pnode)
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +01002221 return false;
2222
Tung Nguyen67879272018-09-28 20:23:22 +02002223 /* Rejected SYN */
2224 if (err != TIPC_ERR_OVERLOAD)
2225 break;
2226
2227 /* Prepare for new setup attempt if we have a SYN clone */
2228 if (skb_queue_empty(&sk->sk_write_queue))
2229 break;
2230 get_random_bytes(&delay, 2);
2231 delay %= (tsk->conn_timeout / 4);
2232 delay = msecs_to_jiffies(delay + 100);
2233 sk_reset_timer(sk, &sk->sk_timer, jiffies + delay);
2234 return false;
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002235 case TIPC_OPEN:
2236 case TIPC_DISCONNECTING:
2237 return false;
2238 case TIPC_LISTEN:
2239 /* Accept only SYN message */
Jon Maloy25b92212018-09-28 20:23:21 +02002240 if (!msg_is_syn(hdr) &&
2241 tipc_node_get_capabilities(net, onode) & TIPC_SYN_BIT)
2242 return false;
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002243 if (!con_msg && !err)
2244 return true;
2245 return false;
2246 case TIPC_ESTABLISHED:
Jon Maloyc0bceb92019-10-30 14:00:41 +01002247 if (!skb_queue_empty(&sk->sk_write_queue))
Tuong Lien0a3e0602020-05-26 16:38:38 +07002248 tipc_sk_push_backlog(tsk, false);
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002249 /* Accept only connection-based messages sent by peer */
Tuong Lienc7268582020-05-13 19:33:16 +07002250 if (likely(con_msg && !err && pport == oport &&
2251 pnode == onode)) {
2252 if (msg_ack_required(hdr)) {
2253 struct sk_buff *skb;
2254
2255 skb = tipc_sk_build_ack(tsk);
Tuong Lien0a3e0602020-05-26 16:38:38 +07002256 if (skb) {
2257 msg_set_nagle_ack(buf_msg(skb));
Tuong Lienc7268582020-05-13 19:33:16 +07002258 __skb_queue_tail(xmitq, skb);
Tuong Lien0a3e0602020-05-26 16:38:38 +07002259 }
Tuong Lienc7268582020-05-13 19:33:16 +07002260 }
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002261 return true;
Tuong Lienc7268582020-05-13 19:33:16 +07002262 }
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002263 if (!tsk_peer_msg(tsk, hdr))
2264 return false;
2265 if (!err)
2266 return true;
2267 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2268 tipc_node_remove_conn(net, pnode, tsk->portid);
2269 sk->sk_state_change(sk);
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +01002270 return true;
Ying Xue7e6c1312012-11-29 18:39:14 -05002271 default:
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +01002272 pr_err("Unknown sk_state %u\n", sk->sk_state);
Ying Xue7e6c1312012-11-29 18:39:14 -05002273 }
Jon Maloy39fdc9c2018-09-28 20:23:20 +02002274 /* Abort connection setup attempt */
2275 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2276 sk->sk_err = ECONNREFUSED;
2277 sk->sk_state_change(sk);
2278 return true;
Ying Xue7e6c1312012-11-29 18:39:14 -05002279}
2280
2281/**
Ying Xueaba79f32013-01-20 23:30:09 +01002282 * rcvbuf_limit - get proper overload limit of socket receive queue
2283 * @sk: socket
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002284 * @skb: message
Ying Xueaba79f32013-01-20 23:30:09 +01002285 *
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002286 * For connection oriented messages, irrespective of importance,
2287 * default queue limit is 2 MB.
Ying Xueaba79f32013-01-20 23:30:09 +01002288 *
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002289 * For connectionless messages, queue limits are based on message
2290 * importance as follows:
Ying Xueaba79f32013-01-20 23:30:09 +01002291 *
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002292 * TIPC_LOW_IMPORTANCE (2 MB)
2293 * TIPC_MEDIUM_IMPORTANCE (4 MB)
2294 * TIPC_HIGH_IMPORTANCE (8 MB)
2295 * TIPC_CRITICAL_IMPORTANCE (16 MB)
Ying Xueaba79f32013-01-20 23:30:09 +01002296 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08002297 * Return: overload limit according to corresponding message importance
Ying Xueaba79f32013-01-20 23:30:09 +01002298 */
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002299static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *skb)
Ying Xueaba79f32013-01-20 23:30:09 +01002300{
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002301 struct tipc_sock *tsk = tipc_sk(sk);
2302 struct tipc_msg *hdr = buf_msg(skb);
Ying Xueaba79f32013-01-20 23:30:09 +01002303
Jon Maloyb7d42632017-10-13 11:04:26 +02002304 if (unlikely(msg_in_group(hdr)))
Eric Dumazet82657922019-10-09 15:21:13 -07002305 return READ_ONCE(sk->sk_rcvbuf);
Jon Maloyb7d42632017-10-13 11:04:26 +02002306
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002307 if (unlikely(!msg_connected(hdr)))
Eric Dumazet82657922019-10-09 15:21:13 -07002308 return READ_ONCE(sk->sk_rcvbuf) << msg_importance(hdr);
wangweidong0cee6bb2013-12-12 09:36:39 +08002309
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002310 if (likely(tsk->peer_caps & TIPC_BLOCK_FLOWCTL))
Eric Dumazet82657922019-10-09 15:21:13 -07002311 return READ_ONCE(sk->sk_rcvbuf);
Jon Paul Maloy10724cc2016-05-02 11:58:47 -04002312
2313 return FLOWCTL_MSG_LIM;
Ying Xueaba79f32013-01-20 23:30:09 +01002314}
2315
2316/**
Jon Maloy64ac5f52017-10-13 11:04:20 +02002317 * tipc_sk_filter_rcv - validate incoming message
Allan Stephens0c3141e2008-04-15 00:22:02 -07002318 * @sk: socket
Jon Paul Maloycda36962015-07-22 10:11:20 -04002319 * @skb: pointer to message.
Randy Dunlapf172f4b2020-11-29 10:32:49 -08002320 * @xmitq: output message area (FIXME)
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002321 *
Allan Stephens0c3141e2008-04-15 00:22:02 -07002322 * Enqueues message on receive queue if acceptable; optionally handles
2323 * disconnect indication for a connected socket.
2324 *
Jon Paul Maloy1186adf2015-02-05 08:36:37 -05002325 * Called with socket lock already taken
Per Lidenb97bf3f2006-01-02 19:04:38 +01002326 */
Jon Maloy64ac5f52017-10-13 11:04:20 +02002327static void tipc_sk_filter_rcv(struct sock *sk, struct sk_buff *skb,
2328 struct sk_buff_head *xmitq)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002329{
Jon Maloy64ac5f52017-10-13 11:04:20 +02002330 bool sk_conn = !tipc_sk_type_connectionless(sk);
Jon Paul Maloy58ed9442014-03-12 11:31:12 -04002331 struct tipc_sock *tsk = tipc_sk(sk);
Jon Maloy75da2162017-10-13 11:04:23 +02002332 struct tipc_group *grp = tsk->group;
Jon Paul Maloycda36962015-07-22 10:11:20 -04002333 struct tipc_msg *hdr = buf_msg(skb);
Jon Maloy64ac5f52017-10-13 11:04:20 +02002334 struct net *net = sock_net(sk);
2335 struct sk_buff_head inputq;
Hoang Le77d5ad42019-03-21 17:25:17 +07002336 int mtyp = msg_type(hdr);
Jon Maloy64ac5f52017-10-13 11:04:20 +02002337 int limit, err = TIPC_OK;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002338
Tuong Lien01e661e2018-12-19 09:17:58 +07002339 trace_tipc_sk_filter_rcv(sk, skb, TIPC_DUMP_ALL, " ");
Parthasarathy Bhuvaraganba8aebe2016-11-01 14:02:37 +01002340 TIPC_SKB_CB(skb)->bytes_read = 0;
Jon Maloy64ac5f52017-10-13 11:04:20 +02002341 __skb_queue_head_init(&inputq);
2342 __skb_queue_tail(&inputq, skb);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002343
Jon Maloy64ac5f52017-10-13 11:04:20 +02002344 if (unlikely(!msg_isdata(hdr)))
2345 tipc_sk_proto_rcv(sk, &inputq, xmitq);
Jon Paul Maloycda36962015-07-22 10:11:20 -04002346
Jon Maloy75da2162017-10-13 11:04:23 +02002347 if (unlikely(grp))
2348 tipc_group_filter_msg(grp, &inputq, xmitq);
2349
Hoang Le77d5ad42019-03-21 17:25:17 +07002350 if (unlikely(!grp) && mtyp == TIPC_MCAST_MSG)
Hoang Le08e046c2019-03-21 17:25:18 +07002351 tipc_mcast_filter_msg(net, &tsk->mc_method.deferredq, &inputq);
Hoang Lec55c8ed2019-03-19 18:49:50 +07002352
Jon Maloy64ac5f52017-10-13 11:04:20 +02002353 /* Validate and add to receive buffer if there is space */
2354 while ((skb = __skb_dequeue(&inputq))) {
2355 hdr = buf_msg(skb);
2356 limit = rcvbuf_limit(sk, skb);
Tuong Lienc7268582020-05-13 19:33:16 +07002357 if ((sk_conn && !tipc_sk_filter_connect(tsk, skb, xmitq)) ||
Jon Maloy75da2162017-10-13 11:04:23 +02002358 (!sk_conn && msg_connected(hdr)) ||
2359 (!grp && msg_in_group(hdr)))
Jon Maloy64ac5f52017-10-13 11:04:20 +02002360 err = TIPC_ERR_NO_PORT;
GhantaKrishnamurthy MohanKrishna872619d2018-03-21 14:37:45 +01002361 else if (sk_rmem_alloc_get(sk) + skb->truesize >= limit) {
Tuong Lien01e661e2018-12-19 09:17:58 +07002362 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL,
2363 "err_overload2!");
GhantaKrishnamurthy MohanKrishna872619d2018-03-21 14:37:45 +01002364 atomic_inc(&sk->sk_drops);
Jon Maloy64ac5f52017-10-13 11:04:20 +02002365 err = TIPC_ERR_OVERLOAD;
GhantaKrishnamurthy MohanKrishna872619d2018-03-21 14:37:45 +01002366 }
Jon Maloy64ac5f52017-10-13 11:04:20 +02002367
2368 if (unlikely(err)) {
Tuong Lien01e661e2018-12-19 09:17:58 +07002369 if (tipc_msg_reverse(tipc_own_addr(net), &skb, err)) {
2370 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_NONE,
2371 "@filter_rcv!");
2372 __skb_queue_tail(xmitq, skb);
2373 }
Jon Maloy64ac5f52017-10-13 11:04:20 +02002374 err = TIPC_OK;
2375 continue;
2376 }
2377 __skb_queue_tail(&sk->sk_receive_queue, skb);
2378 skb_set_owner_r(skb, sk);
Tuong Lien01e661e2018-12-19 09:17:58 +07002379 trace_tipc_sk_overlimit2(sk, skb, TIPC_DUMP_ALL,
2380 "rcvq >90% allocated!");
Jon Maloy64ac5f52017-10-13 11:04:20 +02002381 sk->sk_data_ready(sk);
2382 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01002383}
2384
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002385/**
Jon Maloy64ac5f52017-10-13 11:04:20 +02002386 * tipc_sk_backlog_rcv - handle incoming message from backlog queue
Allan Stephens0c3141e2008-04-15 00:22:02 -07002387 * @sk: socket
Ying Xuea6ca1092014-11-26 11:41:55 +08002388 * @skb: message
Allan Stephens0c3141e2008-04-15 00:22:02 -07002389 *
Jon Paul Maloye3a77562015-02-05 08:36:39 -05002390 * Caller must hold socket lock
Allan Stephens0c3141e2008-04-15 00:22:02 -07002391 */
Jon Maloy64ac5f52017-10-13 11:04:20 +02002392static int tipc_sk_backlog_rcv(struct sock *sk, struct sk_buff *skb)
Allan Stephens0c3141e2008-04-15 00:22:02 -07002393{
Jon Maloy64ac5f52017-10-13 11:04:20 +02002394 unsigned int before = sk_rmem_alloc_get(sk);
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002395 struct sk_buff_head xmitq;
Jon Maloy64ac5f52017-10-13 11:04:20 +02002396 unsigned int added;
Allan Stephens0c3141e2008-04-15 00:22:02 -07002397
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002398 __skb_queue_head_init(&xmitq);
2399
Jon Maloy64ac5f52017-10-13 11:04:20 +02002400 tipc_sk_filter_rcv(sk, skb, &xmitq);
2401 added = sk_rmem_alloc_get(sk) - before;
2402 atomic_add(added, &tipc_sk(sk)->dupl_rcvcnt);
2403
2404 /* Send pending response/rejected messages, if any */
Jon Maloyf70d37b2017-10-13 11:04:21 +02002405 tipc_node_distr_xmit(sock_net(sk), &xmitq);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002406 return 0;
2407}
2408
2409/**
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002410 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
2411 * inputq and try adding them to socket or backlog queue
2412 * @inputq: list of incoming buffers with potentially different destinations
2413 * @sk: socket where the buffers should be enqueued
2414 * @dport: port number for the socket
Randy Dunlapf172f4b2020-11-29 10:32:49 -08002415 * @xmitq: output queue
Jon Paul Maloyd570d862015-02-05 08:36:38 -05002416 *
2417 * Caller must hold socket lock
Jon Paul Maloyd570d862015-02-05 08:36:38 -05002418 */
Jon Paul Maloycda36962015-07-22 10:11:20 -04002419static void tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002420 u32 dport, struct sk_buff_head *xmitq)
Jon Paul Maloyd570d862015-02-05 08:36:38 -05002421{
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002422 unsigned long time_limit = jiffies + 2;
2423 struct sk_buff *skb;
Jon Paul Maloyd570d862015-02-05 08:36:38 -05002424 unsigned int lim;
2425 atomic_t *dcnt;
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002426 u32 onode;
Jon Paul Maloyd570d862015-02-05 08:36:38 -05002427
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002428 while (skb_queue_len(inputq)) {
Jon Paul Maloy51a00da2015-02-08 11:10:50 -05002429 if (unlikely(time_after_eq(jiffies, time_limit)))
Jon Paul Maloycda36962015-07-22 10:11:20 -04002430 return;
2431
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002432 skb = tipc_skb_dequeue(inputq, dport);
2433 if (unlikely(!skb))
Jon Paul Maloycda36962015-07-22 10:11:20 -04002434 return;
2435
2436 /* Add message directly to receive queue if possible */
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002437 if (!sock_owned_by_user(sk)) {
Jon Maloy64ac5f52017-10-13 11:04:20 +02002438 tipc_sk_filter_rcv(sk, skb, xmitq);
Jon Paul Maloycda36962015-07-22 10:11:20 -04002439 continue;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002440 }
Jon Paul Maloycda36962015-07-22 10:11:20 -04002441
2442 /* Try backlog, compensating for double-counted bytes */
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002443 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
Jon Paul Maloy7c8bcfb2016-05-02 11:58:45 -04002444 if (!sk->sk_backlog.len)
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002445 atomic_set(dcnt, 0);
2446 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
Tuong Lien01e661e2018-12-19 09:17:58 +07002447 if (likely(!sk_add_backlog(sk, skb, lim))) {
2448 trace_tipc_sk_overlimit1(sk, skb, TIPC_DUMP_ALL,
2449 "bklg & rcvq >90% allocated!");
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002450 continue;
Tuong Lien01e661e2018-12-19 09:17:58 +07002451 }
Jon Paul Maloycda36962015-07-22 10:11:20 -04002452
Tuong Lien01e661e2018-12-19 09:17:58 +07002453 trace_tipc_sk_dump(sk, skb, TIPC_DUMP_ALL, "err_overload!");
Jon Paul Maloycda36962015-07-22 10:11:20 -04002454 /* Overload => reject message back to sender */
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002455 onode = tipc_own_addr(sock_net(sk));
GhantaKrishnamurthy MohanKrishna872619d2018-03-21 14:37:45 +01002456 atomic_inc(&sk->sk_drops);
Tuong Lien01e661e2018-12-19 09:17:58 +07002457 if (tipc_msg_reverse(onode, &skb, TIPC_ERR_OVERLOAD)) {
2458 trace_tipc_sk_rej_msg(sk, skb, TIPC_DUMP_ALL,
2459 "@sk_enqueue!");
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002460 __skb_queue_tail(xmitq, skb);
Tuong Lien01e661e2018-12-19 09:17:58 +07002461 }
Jon Paul Maloycda36962015-07-22 10:11:20 -04002462 break;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002463 }
Jon Paul Maloyd570d862015-02-05 08:36:38 -05002464}
2465
2466/**
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002467 * tipc_sk_rcv - handle a chain of incoming buffers
Randy Dunlapf172f4b2020-11-29 10:32:49 -08002468 * @net: the associated network namespace
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002469 * @inputq: buffer list containing the buffers
2470 * Consumes all buffers in list until inputq is empty
2471 * Note: may be called in multiple threads referring to the same queue
Allan Stephens0c3141e2008-04-15 00:22:02 -07002472 */
Jon Paul Maloycda36962015-07-22 10:11:20 -04002473void tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
Allan Stephens0c3141e2008-04-15 00:22:02 -07002474{
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002475 struct sk_buff_head xmitq;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002476 u32 dnode, dport = 0;
Erik Hugne9871b272015-04-23 09:37:39 -04002477 int err;
Jon Paul Maloy9816f062014-05-14 05:39:15 -04002478 struct tipc_sock *tsk;
Jon Paul Maloy9816f062014-05-14 05:39:15 -04002479 struct sock *sk;
Jon Paul Maloycda36962015-07-22 10:11:20 -04002480 struct sk_buff *skb;
Jon Paul Maloy9816f062014-05-14 05:39:15 -04002481
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002482 __skb_queue_head_init(&xmitq);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002483 while (skb_queue_len(inputq)) {
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002484 dport = tipc_skb_peek_port(inputq, dport);
2485 tsk = tipc_sk_lookup(net, dport);
Jon Paul Maloycda36962015-07-22 10:11:20 -04002486
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002487 if (likely(tsk)) {
2488 sk = &tsk->sk;
2489 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002490 tipc_sk_enqueue(inputq, sk, dport, &xmitq);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002491 spin_unlock_bh(&sk->sk_lock.slock);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002492 }
Jon Paul Maloyf1d048f2016-06-17 06:35:57 -04002493 /* Send pending response/rejected messages, if any */
Jon Maloyf70d37b2017-10-13 11:04:21 +02002494 tipc_node_distr_xmit(sock_net(sk), &xmitq);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002495 sock_put(sk);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002496 continue;
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002497 }
Jon Paul Maloycda36962015-07-22 10:11:20 -04002498 /* No destination socket => dequeue skb if still there */
2499 skb = tipc_skb_dequeue(inputq, dport);
2500 if (!skb)
2501 return;
2502
2503 /* Try secondary lookup if unresolved named message */
2504 err = TIPC_ERR_NO_PORT;
2505 if (tipc_msg_lookup_dest(net, skb, &err))
2506 goto xmit;
2507
2508 /* Prepare for message rejection */
2509 if (!tipc_msg_reverse(tipc_own_addr(net), &skb, err))
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002510 continue;
Tuong Lien01e661e2018-12-19 09:17:58 +07002511
2512 trace_tipc_sk_rej_msg(NULL, skb, TIPC_DUMP_NONE, "@sk_rcv!");
Jon Paul Maloye3a77562015-02-05 08:36:39 -05002513xmit:
Jon Paul Maloycda36962015-07-22 10:11:20 -04002514 dnode = msg_destnode(buf_msg(skb));
Jon Paul Maloyaf9b0282015-07-16 16:54:24 -04002515 tipc_node_xmit_skb(net, skb, dnode, dport);
Jon Paul Maloyc637c102015-02-05 08:36:41 -05002516 }
Allan Stephens0c3141e2008-04-15 00:22:02 -07002517}
2518
Ying Xue78eb3a52014-01-17 09:50:03 +08002519static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
2520{
WANG Congd9dc8b02016-11-11 10:20:50 -08002521 DEFINE_WAIT_FUNC(wait, woken_wake_function);
Ying Xue78eb3a52014-01-17 09:50:03 +08002522 struct sock *sk = sock->sk;
Ying Xue78eb3a52014-01-17 09:50:03 +08002523 int done;
2524
2525 do {
2526 int err = sock_error(sk);
2527 if (err)
2528 return err;
2529 if (!*timeo_p)
2530 return -ETIMEDOUT;
2531 if (signal_pending(current))
2532 return sock_intr_errno(*timeo_p);
Tuong Lien5391a872020-02-10 15:35:44 +07002533 if (sk->sk_state == TIPC_DISCONNECTING)
2534 break;
Ying Xue78eb3a52014-01-17 09:50:03 +08002535
WANG Congd9dc8b02016-11-11 10:20:50 -08002536 add_wait_queue(sk_sleep(sk), &wait);
Tuong Lien9546a0b2020-01-08 09:19:00 +07002537 done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
2538 &wait);
WANG Congd9dc8b02016-11-11 10:20:50 -08002539 remove_wait_queue(sk_sleep(sk), &wait);
Ying Xue78eb3a52014-01-17 09:50:03 +08002540 } while (!done);
2541 return 0;
2542}
2543
Erik Hugneea239312019-03-17 18:46:42 +01002544static bool tipc_sockaddr_is_sane(struct sockaddr_tipc *addr)
2545{
2546 if (addr->family != AF_TIPC)
2547 return false;
2548 if (addr->addrtype == TIPC_SERVICE_RANGE)
2549 return (addr->addr.nameseq.lower <= addr->addr.nameseq.upper);
2550 return (addr->addrtype == TIPC_SERVICE_ADDR ||
2551 addr->addrtype == TIPC_SOCKET_ADDR);
2552}
2553
Per Lidenb97bf3f2006-01-02 19:04:38 +01002554/**
Ying Xue247f0f32014-02-18 16:06:46 +08002555 * tipc_connect - establish a connection to another TIPC port
Per Lidenb97bf3f2006-01-02 19:04:38 +01002556 * @sock: socket structure
2557 * @dest: socket address for destination port
2558 * @destlen: size of socket address data structure
Allan Stephens0c3141e2008-04-15 00:22:02 -07002559 * @flags: file-related flags associated with socket
Per Lidenb97bf3f2006-01-02 19:04:38 +01002560 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08002561 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01002562 */
Ying Xue247f0f32014-02-18 16:06:46 +08002563static int tipc_connect(struct socket *sock, struct sockaddr *dest,
2564 int destlen, int flags)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002565{
Allan Stephens0c3141e2008-04-15 00:22:02 -07002566 struct sock *sk = sock->sk;
Erik Hugnef2f80362015-03-19 09:02:19 +01002567 struct tipc_sock *tsk = tipc_sk(sk);
Allan Stephensb89741a2008-04-15 00:20:37 -07002568 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
2569 struct msghdr m = {NULL,};
Erik Hugnef2f80362015-03-19 09:02:19 +01002570 long timeout = (flags & O_NONBLOCK) ? 0 : tsk->conn_timeout;
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002571 int previous;
Erik Hugnef2f80362015-03-19 09:02:19 +01002572 int res = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002573
Jon Maloy23998832017-10-13 11:04:18 +02002574 if (destlen != sizeof(struct sockaddr_tipc))
2575 return -EINVAL;
2576
Allan Stephens0c3141e2008-04-15 00:22:02 -07002577 lock_sock(sk);
2578
Jon Maloy75da2162017-10-13 11:04:23 +02002579 if (tsk->group) {
2580 res = -EINVAL;
2581 goto exit;
2582 }
2583
Jon Maloy23998832017-10-13 11:04:18 +02002584 if (dst->family == AF_UNSPEC) {
2585 memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
2586 if (!tipc_sk_type_connectionless(sk))
2587 res = -EINVAL;
2588 goto exit;
Jon Maloy23998832017-10-13 11:04:18 +02002589 }
Erik Hugneea239312019-03-17 18:46:42 +01002590 if (!tipc_sockaddr_is_sane(dst)) {
Jon Maloy23998832017-10-13 11:04:18 +02002591 res = -EINVAL;
Jon Maloy23998832017-10-13 11:04:18 +02002592 goto exit;
Erik Hugneea239312019-03-17 18:46:42 +01002593 }
Erik Hugnef2f80362015-03-19 09:02:19 +01002594 /* DGRAM/RDM connect(), just save the destaddr */
Parthasarathy Bhuvaraganc752023a2016-11-01 14:02:42 +01002595 if (tipc_sk_type_connectionless(sk)) {
Jon Maloy23998832017-10-13 11:04:18 +02002596 memcpy(&tsk->peer, dest, destlen);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002597 goto exit;
Erik Hugneea239312019-03-17 18:46:42 +01002598 } else if (dst->addrtype == TIPC_SERVICE_RANGE) {
2599 res = -EINVAL;
2600 goto exit;
Allan Stephens0c3141e2008-04-15 00:22:02 -07002601 }
2602
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002603 previous = sk->sk_state;
Parthasarathy Bhuvaragan438adca2016-11-01 14:02:45 +01002604
2605 switch (sk->sk_state) {
2606 case TIPC_OPEN:
Ying Xue584d24b2012-11-29 18:51:19 -05002607 /* Send a 'SYN-' to destination */
2608 m.msg_name = dest;
2609 m.msg_namelen = destlen;
2610
2611 /* If connect is in non-blocking case, set MSG_DONTWAIT to
2612 * indicate send_msg() is never blocked.
2613 */
2614 if (!timeout)
2615 m.msg_flags = MSG_DONTWAIT;
2616
Ying Xue39a0295f2015-03-02 15:37:47 +08002617 res = __tipc_sendmsg(sock, &m, 0);
Ying Xue584d24b2012-11-29 18:51:19 -05002618 if ((res < 0) && (res != -EWOULDBLOCK))
2619 goto exit;
2620
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002621 /* Just entered TIPC_CONNECTING state; the only
Ying Xue584d24b2012-11-29 18:51:19 -05002622 * difference is that return value in non-blocking
2623 * case is EINPROGRESS, rather than EALREADY.
2624 */
2625 res = -EINPROGRESS;
Miaohe Lin7f8901b2020-08-18 08:07:13 -04002626 fallthrough;
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002627 case TIPC_CONNECTING:
2628 if (!timeout) {
2629 if (previous == TIPC_CONNECTING)
2630 res = -EALREADY;
Ying Xue78eb3a52014-01-17 09:50:03 +08002631 goto exit;
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002632 }
Ying Xue78eb3a52014-01-17 09:50:03 +08002633 timeout = msecs_to_jiffies(timeout);
2634 /* Wait until an 'ACK' or 'RST' arrives, or a timeout occurs */
2635 res = tipc_wait_for_connect(sock, &timeout);
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +01002636 break;
2637 case TIPC_ESTABLISHED:
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002638 res = -EISCONN;
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +01002639 break;
2640 default:
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002641 res = -EINVAL;
Parthasarathy Bhuvaraganf40acba2016-11-01 14:02:49 +01002642 }
Parthasarathy Bhuvaragan99a20882016-11-01 14:02:48 +01002643
Allan Stephens0c3141e2008-04-15 00:22:02 -07002644exit:
2645 release_sock(sk);
Allan Stephensb89741a2008-04-15 00:20:37 -07002646 return res;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002647}
2648
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002649/**
Ying Xue247f0f32014-02-18 16:06:46 +08002650 * tipc_listen - allow socket to listen for incoming connections
Per Lidenb97bf3f2006-01-02 19:04:38 +01002651 * @sock: socket structure
2652 * @len: (unused)
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002653 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08002654 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01002655 */
Ying Xue247f0f32014-02-18 16:06:46 +08002656static int tipc_listen(struct socket *sock, int len)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002657{
Allan Stephens0c3141e2008-04-15 00:22:02 -07002658 struct sock *sk = sock->sk;
2659 int res;
2660
2661 lock_sock(sk);
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +01002662 res = tipc_set_sk_state(sk, TIPC_LISTEN);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002663 release_sock(sk);
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +01002664
Allan Stephens0c3141e2008-04-15 00:22:02 -07002665 return res;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002666}
2667
Ying Xue6398e232014-01-17 09:50:04 +08002668static int tipc_wait_for_accept(struct socket *sock, long timeo)
2669{
2670 struct sock *sk = sock->sk;
2671 DEFINE_WAIT(wait);
2672 int err;
2673
2674 /* True wake-one mechanism for incoming connections: only
2675 * one process gets woken up, not the 'whole herd'.
2676 * Since we do not 'race & poll' for established sockets
2677 * anymore, the common case will execute the loop only once.
2678 */
2679 for (;;) {
2680 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
2681 TASK_INTERRUPTIBLE);
Ying Xuefe8e4642014-03-06 14:40:18 +01002682 if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
Ying Xue6398e232014-01-17 09:50:04 +08002683 release_sock(sk);
2684 timeo = schedule_timeout(timeo);
2685 lock_sock(sk);
2686 }
2687 err = 0;
2688 if (!skb_queue_empty(&sk->sk_receive_queue))
2689 break;
Ying Xue6398e232014-01-17 09:50:04 +08002690 err = -EAGAIN;
2691 if (!timeo)
2692 break;
Erik Hugne143fe222015-03-09 10:43:42 +01002693 err = sock_intr_errno(timeo);
2694 if (signal_pending(current))
2695 break;
Ying Xue6398e232014-01-17 09:50:04 +08002696 }
2697 finish_wait(sk_sleep(sk), &wait);
2698 return err;
2699}
2700
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002701/**
Ying Xue247f0f32014-02-18 16:06:46 +08002702 * tipc_accept - wait for connection request
Per Lidenb97bf3f2006-01-02 19:04:38 +01002703 * @sock: listening socket
Andrew Lunnd8141202020-07-13 01:15:14 +02002704 * @new_sock: new socket that is to be connected
Per Lidenb97bf3f2006-01-02 19:04:38 +01002705 * @flags: file-related flags associated with socket
Randy Dunlapf172f4b2020-11-29 10:32:49 -08002706 * @kern: caused by kernel or by userspace?
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002707 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08002708 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01002709 */
David Howellscdfbabf2017-03-09 08:09:05 +00002710static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags,
2711 bool kern)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002712{
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002713 struct sock *new_sk, *sk = sock->sk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002714 struct sk_buff *buf;
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002715 struct tipc_sock *new_tsock;
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002716 struct tipc_msg *msg;
Ying Xue6398e232014-01-17 09:50:04 +08002717 long timeo;
Allan Stephens0c3141e2008-04-15 00:22:02 -07002718 int res;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002719
Allan Stephens0c3141e2008-04-15 00:22:02 -07002720 lock_sock(sk);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002721
Parthasarathy Bhuvaragan0c288c82016-11-01 14:02:43 +01002722 if (sk->sk_state != TIPC_LISTEN) {
Allan Stephens0c3141e2008-04-15 00:22:02 -07002723 res = -EINVAL;
2724 goto exit;
2725 }
Ying Xue6398e232014-01-17 09:50:04 +08002726 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
2727 res = tipc_wait_for_accept(sock, timeo);
2728 if (res)
2729 goto exit;
Allan Stephens0c3141e2008-04-15 00:22:02 -07002730
2731 buf = skb_peek(&sk->sk_receive_queue);
2732
David Howellscdfbabf2017-03-09 08:09:05 +00002733 res = tipc_sk_create(sock_net(sock->sk), new_sock, 0, kern);
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002734 if (res)
2735 goto exit;
Stephen Smalleyfdd75ea2015-07-07 09:43:45 -04002736 security_sk_clone(sock->sk, new_sock->sk);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002737
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002738 new_sk = new_sock->sk;
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002739 new_tsock = tipc_sk(new_sk);
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002740 msg = buf_msg(buf);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002741
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002742 /* we lock on new_sk; but lockdep sees the lock on sk */
2743 lock_sock_nested(new_sk, SINGLE_DEPTH_NESTING);
Allan Stephens0c3141e2008-04-15 00:22:02 -07002744
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002745 /*
2746 * Reject any stray messages received by new socket
2747 * before the socket lock was taken (very, very unlikely)
2748 */
Tuong Lien49afb802020-01-08 09:18:15 +07002749 tsk_rej_rx_queue(new_sk, TIPC_ERR_NO_PORT);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002750
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002751 /* Connect new socket to it's peer */
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002752 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
Per Lidenb97bf3f2006-01-02 19:04:38 +01002753
Christoph Hellwig095ae612020-05-28 07:12:36 +02002754 tsk_set_importance(new_sk, msg_importance(msg));
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002755 if (msg_named(msg)) {
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002756 new_tsock->conn_type = msg_nametype(msg);
2757 new_tsock->conn_instance = msg_nameinst(msg);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002758 }
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002759
2760 /*
2761 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
2762 * Respond to 'SYN+' by queuing it on new socket.
2763 */
2764 if (!msg_data_sz(msg)) {
2765 struct msghdr m = {NULL,};
2766
Jon Paul Maloy2e84c602014-08-22 18:09:18 -04002767 tsk_advance_rx_queue(sk);
Jon Paul Maloy365ad352017-01-03 10:55:11 -05002768 __tipc_sendstream(new_sock, &m, 0);
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002769 } else {
2770 __skb_dequeue(&sk->sk_receive_queue);
2771 __skb_queue_head(&new_sk->sk_receive_queue, buf);
Ying Xueaba79f32013-01-20 23:30:09 +01002772 skb_set_owner_r(buf, new_sk);
Paul Gortmaker0fef8f22012-12-04 11:01:55 -05002773 }
2774 release_sock(new_sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002775exit:
Allan Stephens0c3141e2008-04-15 00:22:02 -07002776 release_sock(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002777 return res;
2778}
2779
2780/**
Ying Xue247f0f32014-02-18 16:06:46 +08002781 * tipc_shutdown - shutdown socket connection
Per Lidenb97bf3f2006-01-02 19:04:38 +01002782 * @sock: socket structure
Allan Stephense247a8f2008-03-06 15:05:38 -08002783 * @how: direction to close (must be SHUT_RDWR)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002784 *
2785 * Terminates connection (if necessary), then purges socket's receive queue.
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09002786 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08002787 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01002788 */
Ying Xue247f0f32014-02-18 16:06:46 +08002789static int tipc_shutdown(struct socket *sock, int how)
Per Lidenb97bf3f2006-01-02 19:04:38 +01002790{
Allan Stephens0c3141e2008-04-15 00:22:02 -07002791 struct sock *sk = sock->sk;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002792 int res;
2793
Allan Stephense247a8f2008-03-06 15:05:38 -08002794 if (how != SHUT_RDWR)
2795 return -EINVAL;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002796
Allan Stephens0c3141e2008-04-15 00:22:02 -07002797 lock_sock(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002798
Tuong Lien01e661e2018-12-19 09:17:58 +07002799 trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " ");
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +01002800 __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN);
Tetsuo Handaa4b5cc92020-09-05 15:14:47 +09002801 sk->sk_shutdown = SHUTDOWN_MASK;
Per Lidenb97bf3f2006-01-02 19:04:38 +01002802
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +01002803 if (sk->sk_state == TIPC_DISCONNECTING) {
Ying Xue75031152012-10-29 09:38:15 -04002804 /* Discard any unreceived messages */
Ying Xue57467e52013-01-20 23:30:08 +01002805 __skb_queue_purge(&sk->sk_receive_queue);
Ying Xue75031152012-10-29 09:38:15 -04002806
Per Lidenb97bf3f2006-01-02 19:04:38 +01002807 res = 0;
Parthasarathy Bhuvaragan6f000892016-11-01 14:02:47 +01002808 } else {
Per Lidenb97bf3f2006-01-02 19:04:38 +01002809 res = -ENOTCONN;
2810 }
Tetsuo Handa2a638662020-09-02 22:44:16 +09002811 /* Wake up anyone sleeping in poll. */
2812 sk->sk_state_change(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002813
Allan Stephens0c3141e2008-04-15 00:22:02 -07002814 release_sock(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01002815 return res;
2816}
2817
Jon Maloyafe87922018-09-28 20:23:19 +02002818static void tipc_sk_check_probing_state(struct sock *sk,
2819 struct sk_buff_head *list)
2820{
2821 struct tipc_sock *tsk = tipc_sk(sk);
2822 u32 pnode = tsk_peer_node(tsk);
2823 u32 pport = tsk_peer_port(tsk);
2824 u32 self = tsk_own_node(tsk);
2825 u32 oport = tsk->portid;
2826 struct sk_buff *skb;
2827
2828 if (tsk->probe_unacked) {
2829 tipc_set_sk_state(sk, TIPC_DISCONNECTING);
2830 sk->sk_err = ECONNABORTED;
2831 tipc_node_remove_conn(sock_net(sk), pnode, pport);
2832 sk->sk_state_change(sk);
2833 return;
2834 }
2835 /* Prepare new probe */
2836 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 0,
2837 pnode, self, pport, oport, TIPC_OK);
2838 if (skb)
2839 __skb_queue_tail(list, skb);
2840 tsk->probe_unacked = true;
2841 sk_reset_timer(sk, &sk->sk_timer, jiffies + CONN_PROBING_INTV);
2842}
2843
Tung Nguyen67879272018-09-28 20:23:22 +02002844static void tipc_sk_retry_connect(struct sock *sk, struct sk_buff_head *list)
2845{
2846 struct tipc_sock *tsk = tipc_sk(sk);
2847
2848 /* Try again later if dest link is congested */
2849 if (tsk->cong_link_cnt) {
2850 sk_reset_timer(sk, &sk->sk_timer, msecs_to_jiffies(100));
2851 return;
2852 }
2853 /* Prepare SYN for retransmit */
2854 tipc_msg_skb_clone(&sk->sk_write_queue, list);
2855}
2856
Kees Cook31b102b2017-10-30 14:06:45 -07002857static void tipc_sk_timeout(struct timer_list *t)
Jon Paul Maloy57289012014-08-22 18:09:09 -04002858{
Kees Cook31b102b2017-10-30 14:06:45 -07002859 struct sock *sk = from_timer(sk, t, sk_timer);
2860 struct tipc_sock *tsk = tipc_sk(sk);
Jon Maloyafe87922018-09-28 20:23:19 +02002861 u32 pnode = tsk_peer_node(tsk);
2862 struct sk_buff_head list;
Tung Nguyen67879272018-09-28 20:23:22 +02002863 int rc = 0;
Jon Paul Maloy57289012014-08-22 18:09:09 -04002864
Jon Maloye654f9f2019-08-15 16:42:50 +02002865 __skb_queue_head_init(&list);
Jon Paul Maloy57289012014-08-22 18:09:09 -04002866 bh_lock_sock(sk);
Jon Maloy0d5fcebf2017-10-20 11:21:32 +02002867
2868 /* Try again later if socket is busy */
2869 if (sock_owned_by_user(sk)) {
2870 sk_reset_timer(sk, &sk->sk_timer, jiffies + HZ / 20);
Jon Maloyafe87922018-09-28 20:23:19 +02002871 bh_unlock_sock(sk);
Tung Nguyen91a4a3e2019-11-28 10:10:06 +07002872 sock_put(sk);
Jon Maloyafe87922018-09-28 20:23:19 +02002873 return;
Jon Paul Maloy6c9808c2014-08-22 18:09:16 -04002874 }
Jon Paul Maloy57289012014-08-22 18:09:09 -04002875
Jon Maloyafe87922018-09-28 20:23:19 +02002876 if (sk->sk_state == TIPC_ESTABLISHED)
2877 tipc_sk_check_probing_state(sk, &list);
Tung Nguyen67879272018-09-28 20:23:22 +02002878 else if (sk->sk_state == TIPC_CONNECTING)
2879 tipc_sk_retry_connect(sk, &list);
Jon Maloyafe87922018-09-28 20:23:19 +02002880
Jon Paul Maloy57289012014-08-22 18:09:09 -04002881 bh_unlock_sock(sk);
Jon Maloyafe87922018-09-28 20:23:19 +02002882
2883 if (!skb_queue_empty(&list))
Tung Nguyen67879272018-09-28 20:23:22 +02002884 rc = tipc_node_xmit(sock_net(sk), &list, pnode, tsk->portid);
Jon Maloyafe87922018-09-28 20:23:19 +02002885
Tung Nguyen67879272018-09-28 20:23:22 +02002886 /* SYN messages may cause link congestion */
2887 if (rc == -ELINKCONG) {
2888 tipc_dest_push(&tsk->cong_links, pnode, 0);
2889 tsk->cong_link_cnt = 1;
2890 }
Ying Xue07f6c4b2015-01-07 13:41:58 +08002891 sock_put(sk);
Jon Paul Maloy57289012014-08-22 18:09:09 -04002892}
2893
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002894static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
Jon Maloyb6f88d92020-11-25 13:29:15 -05002895 struct tipc_service_range const *seq)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002896{
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +01002897 struct sock *sk = &tsk->sk;
2898 struct net *net = sock_net(sk);
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002899 struct publication *publ;
2900 u32 key;
2901
Jon Maloy928df182018-03-15 16:48:51 +01002902 if (scope != TIPC_NODE_SCOPE)
2903 scope = TIPC_CLUSTER_SCOPE;
2904
Parthasarathy Bhuvaragand6fb7e92016-11-01 14:02:40 +01002905 if (tipc_sk_connected(sk))
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002906 return -EINVAL;
Ying Xue07f6c4b2015-01-07 13:41:58 +08002907 key = tsk->portid + tsk->pub_count + 1;
2908 if (key == tsk->portid)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002909 return -EADDRINUSE;
2910
Ying Xuef2f98002015-01-09 15:27:05 +08002911 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
Ying Xue07f6c4b2015-01-07 13:41:58 +08002912 scope, tsk->portid, key);
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002913 if (unlikely(!publ))
2914 return -EINVAL;
2915
Jon Maloye50e73e2018-03-15 16:48:55 +01002916 list_add(&publ->binding_sock, &tsk->publications);
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002917 tsk->pub_count++;
2918 tsk->published = 1;
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002919 return 0;
2920}
2921
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002922static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
Jon Maloyb6f88d92020-11-25 13:29:15 -05002923 struct tipc_service_range const *seq)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002924{
Ying Xuef2f98002015-01-09 15:27:05 +08002925 struct net *net = sock_net(&tsk->sk);
Jon Maloy998d3902021-03-16 22:06:08 -04002926 struct publication *p;
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002927 struct publication *safe;
2928 int rc = -EINVAL;
2929
Jon Maloy928df182018-03-15 16:48:51 +01002930 if (scope != TIPC_NODE_SCOPE)
2931 scope = TIPC_CLUSTER_SCOPE;
2932
Jon Maloy998d3902021-03-16 22:06:08 -04002933 list_for_each_entry_safe(p, safe, &tsk->publications, binding_sock) {
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002934 if (seq) {
Jon Maloy998d3902021-03-16 22:06:08 -04002935 if (p->scope != scope)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002936 continue;
Jon Maloy998d3902021-03-16 22:06:08 -04002937 if (p->sr.type != seq->type)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002938 continue;
Jon Maloy998d3902021-03-16 22:06:08 -04002939 if (p->sr.lower != seq->lower)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002940 continue;
Jon Maloy998d3902021-03-16 22:06:08 -04002941 if (p->sr.upper != seq->upper)
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002942 break;
Jon Maloy998d3902021-03-16 22:06:08 -04002943 tipc_nametbl_withdraw(net, p->sr.type, p->sr.lower,
2944 p->sr.upper, p->key);
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002945 rc = 0;
2946 break;
2947 }
Jon Maloy998d3902021-03-16 22:06:08 -04002948 tipc_nametbl_withdraw(net, p->sr.type, p->sr.lower,
2949 p->sr.upper, p->key);
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002950 rc = 0;
2951 }
Jon Paul Maloy301bae52014-08-22 18:09:20 -04002952 if (list_empty(&tsk->publications))
2953 tsk->published = 0;
Jon Paul Maloy0fc87aa2014-08-22 18:09:17 -04002954 return rc;
2955}
2956
Jon Paul Maloy5a9ee0be2014-08-22 18:09:14 -04002957/* tipc_sk_reinit: set non-zero address in all existing sockets
2958 * when we go from standalone to network mode.
2959 */
Ying Xuee05b31f2015-01-09 15:27:08 +08002960void tipc_sk_reinit(struct net *net)
Jon Paul Maloy5a9ee0be2014-08-22 18:09:14 -04002961{
Ying Xuee05b31f2015-01-09 15:27:08 +08002962 struct tipc_net *tn = net_generic(net, tipc_net_id);
Herbert Xu40f9f432017-02-11 19:26:46 +08002963 struct rhashtable_iter iter;
Ying Xue07f6c4b2015-01-07 13:41:58 +08002964 struct tipc_sock *tsk;
Jon Paul Maloy5a9ee0be2014-08-22 18:09:14 -04002965 struct tipc_msg *msg;
Jon Paul Maloy5a9ee0be2014-08-22 18:09:14 -04002966
Herbert Xu40f9f432017-02-11 19:26:46 +08002967 rhashtable_walk_enter(&tn->sk_rht, &iter);
2968
2969 do {
Tom Herbert97a6ec42017-12-04 10:31:41 -08002970 rhashtable_walk_start(&iter);
Herbert Xu40f9f432017-02-11 19:26:46 +08002971
2972 while ((tsk = rhashtable_walk_next(&iter)) && !IS_ERR(tsk)) {
Cong Wang15ef70e2018-12-10 11:49:55 -08002973 sock_hold(&tsk->sk);
2974 rhashtable_walk_stop(&iter);
2975 lock_sock(&tsk->sk);
Ying Xue07f6c4b2015-01-07 13:41:58 +08002976 msg = &tsk->phdr;
Jon Maloy23fd3ea2018-03-22 20:42:49 +01002977 msg_set_prevnode(msg, tipc_own_addr(net));
2978 msg_set_orignode(msg, tipc_own_addr(net));
Cong Wang15ef70e2018-12-10 11:49:55 -08002979 release_sock(&tsk->sk);
2980 rhashtable_walk_start(&iter);
2981 sock_put(&tsk->sk);
Ying Xue07f6c4b2015-01-07 13:41:58 +08002982 }
Tom Herbert97a6ec42017-12-04 10:31:41 -08002983
Herbert Xu40f9f432017-02-11 19:26:46 +08002984 rhashtable_walk_stop(&iter);
2985 } while (tsk == ERR_PTR(-EAGAIN));
Cong Wangbd583fe2018-08-23 16:19:44 -07002986
2987 rhashtable_walk_exit(&iter);
Ying Xue07f6c4b2015-01-07 13:41:58 +08002988}
2989
Ying Xuee05b31f2015-01-09 15:27:08 +08002990static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
Ying Xue07f6c4b2015-01-07 13:41:58 +08002991{
Ying Xuee05b31f2015-01-09 15:27:08 +08002992 struct tipc_net *tn = net_generic(net, tipc_net_id);
Ying Xue07f6c4b2015-01-07 13:41:58 +08002993 struct tipc_sock *tsk;
2994
2995 rcu_read_lock();
Taehee Yooab818362019-11-22 08:15:19 +00002996 tsk = rhashtable_lookup(&tn->sk_rht, &portid, tsk_rht_params);
Ying Xue07f6c4b2015-01-07 13:41:58 +08002997 if (tsk)
2998 sock_hold(&tsk->sk);
2999 rcu_read_unlock();
3000
3001 return tsk;
3002}
3003
3004static int tipc_sk_insert(struct tipc_sock *tsk)
3005{
Ying Xuee05b31f2015-01-09 15:27:08 +08003006 struct sock *sk = &tsk->sk;
3007 struct net *net = sock_net(sk);
3008 struct tipc_net *tn = net_generic(net, tipc_net_id);
Ying Xue07f6c4b2015-01-07 13:41:58 +08003009 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
3010 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
3011
3012 while (remaining--) {
3013 portid++;
3014 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
3015 portid = TIPC_MIN_PORT;
3016 tsk->portid = portid;
3017 sock_hold(&tsk->sk);
Herbert Xu6cca72892015-03-20 21:57:05 +11003018 if (!rhashtable_lookup_insert_fast(&tn->sk_rht, &tsk->node,
3019 tsk_rht_params))
Ying Xue07f6c4b2015-01-07 13:41:58 +08003020 return 0;
3021 sock_put(&tsk->sk);
3022 }
3023
3024 return -1;
3025}
3026
3027static void tipc_sk_remove(struct tipc_sock *tsk)
3028{
3029 struct sock *sk = &tsk->sk;
Ying Xuee05b31f2015-01-09 15:27:08 +08003030 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
Ying Xue07f6c4b2015-01-07 13:41:58 +08003031
Herbert Xu6cca72892015-03-20 21:57:05 +11003032 if (!rhashtable_remove_fast(&tn->sk_rht, &tsk->node, tsk_rht_params)) {
Reshetova, Elena41c6d652017-06-30 13:08:01 +03003033 WARN_ON(refcount_read(&sk->sk_refcnt) == 1);
Ying Xue07f6c4b2015-01-07 13:41:58 +08003034 __sock_put(sk);
Jon Paul Maloy5a9ee0be2014-08-22 18:09:14 -04003035 }
3036}
3037
Herbert Xu6cca72892015-03-20 21:57:05 +11003038static const struct rhashtable_params tsk_rht_params = {
3039 .nelem_hint = 192,
3040 .head_offset = offsetof(struct tipc_sock, node),
3041 .key_offset = offsetof(struct tipc_sock, portid),
3042 .key_len = sizeof(u32), /* portid */
Herbert Xu6cca72892015-03-20 21:57:05 +11003043 .max_size = 1048576,
3044 .min_size = 256,
Thomas Grafb5e2c152015-03-24 20:42:19 +00003045 .automatic_shrinking = true,
Herbert Xu6cca72892015-03-20 21:57:05 +11003046};
3047
Ying Xuee05b31f2015-01-09 15:27:08 +08003048int tipc_sk_rht_init(struct net *net)
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003049{
Ying Xuee05b31f2015-01-09 15:27:08 +08003050 struct tipc_net *tn = net_generic(net, tipc_net_id);
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003051
Herbert Xu6cca72892015-03-20 21:57:05 +11003052 return rhashtable_init(&tn->sk_rht, &tsk_rht_params);
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003053}
3054
Ying Xuee05b31f2015-01-09 15:27:08 +08003055void tipc_sk_rht_destroy(struct net *net)
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003056{
Ying Xuee05b31f2015-01-09 15:27:08 +08003057 struct tipc_net *tn = net_generic(net, tipc_net_id);
3058
Ying Xue07f6c4b2015-01-07 13:41:58 +08003059 /* Wait for socket readers to complete */
3060 synchronize_net();
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003061
Ying Xuee05b31f2015-01-09 15:27:08 +08003062 rhashtable_destroy(&tn->sk_rht);
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003063}
3064
Jon Maloy75da2162017-10-13 11:04:23 +02003065static int tipc_sk_join(struct tipc_sock *tsk, struct tipc_group_req *mreq)
3066{
3067 struct net *net = sock_net(&tsk->sk);
Jon Maloy75da2162017-10-13 11:04:23 +02003068 struct tipc_group *grp = tsk->group;
3069 struct tipc_msg *hdr = &tsk->phdr;
Jon Maloyb6f88d92020-11-25 13:29:15 -05003070 struct tipc_service_range seq;
Jon Maloy75da2162017-10-13 11:04:23 +02003071 int rc;
3072
3073 if (mreq->type < TIPC_RESERVED_TYPES)
3074 return -EACCES;
Jon Maloy232d07b2018-01-08 21:03:30 +01003075 if (mreq->scope > TIPC_NODE_SCOPE)
3076 return -EINVAL;
Jon Maloy75da2162017-10-13 11:04:23 +02003077 if (grp)
3078 return -EACCES;
Jon Maloy60c25302018-01-17 16:42:46 +01003079 grp = tipc_group_create(net, tsk->portid, mreq, &tsk->group_is_open);
Jon Maloy75da2162017-10-13 11:04:23 +02003080 if (!grp)
3081 return -ENOMEM;
3082 tsk->group = grp;
3083 msg_set_lookup_scope(hdr, mreq->scope);
3084 msg_set_nametype(hdr, mreq->type);
3085 msg_set_dest_droppable(hdr, true);
3086 seq.type = mreq->type;
3087 seq.lower = mreq->instance;
3088 seq.upper = seq.lower;
Jon Maloy232d07b2018-01-08 21:03:30 +01003089 tipc_nametbl_build_group(net, grp, mreq->type, mreq->scope);
Jon Maloy75da2162017-10-13 11:04:23 +02003090 rc = tipc_sk_publish(tsk, mreq->scope, &seq);
Cong Wange233df02017-10-24 15:44:49 -07003091 if (rc) {
Jon Maloy75da2162017-10-13 11:04:23 +02003092 tipc_group_delete(net, grp);
Cong Wange233df02017-10-24 15:44:49 -07003093 tsk->group = NULL;
Jon Maloyfebafc82018-01-10 21:08:50 +01003094 return rc;
Cong Wange233df02017-10-24 15:44:49 -07003095 }
Jon Maloyd12d2e12018-01-08 21:03:28 +01003096 /* Eliminate any risk that a broadcast overtakes sent JOINs */
Jon Maloy399574d2017-10-13 11:04:32 +02003097 tsk->mc_method.rcast = true;
3098 tsk->mc_method.mandatory = true;
Jon Maloyd12d2e12018-01-08 21:03:28 +01003099 tipc_group_join(net, grp, &tsk->sk.sk_rcvbuf);
Jon Maloy75da2162017-10-13 11:04:23 +02003100 return rc;
3101}
3102
3103static int tipc_sk_leave(struct tipc_sock *tsk)
3104{
3105 struct net *net = sock_net(&tsk->sk);
3106 struct tipc_group *grp = tsk->group;
Jon Maloyb6f88d92020-11-25 13:29:15 -05003107 struct tipc_service_range seq;
Jon Maloy75da2162017-10-13 11:04:23 +02003108 int scope;
3109
3110 if (!grp)
3111 return -EINVAL;
3112 tipc_group_self(grp, &seq, &scope);
3113 tipc_group_delete(net, grp);
3114 tsk->group = NULL;
3115 tipc_sk_withdraw(tsk, scope, &seq);
3116 return 0;
3117}
3118
Jon Paul Maloy808d90f2014-08-22 18:09:19 -04003119/**
Ying Xue247f0f32014-02-18 16:06:46 +08003120 * tipc_setsockopt - set socket option
Per Lidenb97bf3f2006-01-02 19:04:38 +01003121 * @sock: socket structure
3122 * @lvl: option level
3123 * @opt: option identifier
3124 * @ov: pointer to new option value
3125 * @ol: length of option value
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003126 *
3127 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
Per Lidenb97bf3f2006-01-02 19:04:38 +01003128 * (to ease compatibility).
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003129 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08003130 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01003131 */
Ying Xue247f0f32014-02-18 16:06:46 +08003132static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02003133 sockptr_t ov, unsigned int ol)
Per Lidenb97bf3f2006-01-02 19:04:38 +01003134{
Allan Stephens0c3141e2008-04-15 00:22:02 -07003135 struct sock *sk = sock->sk;
Jon Paul Maloy58ed9442014-03-12 11:31:12 -04003136 struct tipc_sock *tsk = tipc_sk(sk);
Jon Maloy75da2162017-10-13 11:04:23 +02003137 struct tipc_group_req mreq;
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -05003138 u32 value = 0;
Dan Carpentera08ef472017-01-24 12:49:35 +03003139 int res = 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003140
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003141 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3142 return 0;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003143 if (lvl != SOL_TIPC)
3144 return -ENOPROTOOPT;
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -05003145
3146 switch (opt) {
3147 case TIPC_IMPORTANCE:
3148 case TIPC_SRC_DROPPABLE:
3149 case TIPC_DEST_DROPPABLE:
3150 case TIPC_CONN_TIMEOUT:
Jon Maloyc0bceb92019-10-30 14:00:41 +01003151 case TIPC_NODELAY:
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -05003152 if (ol < sizeof(value))
3153 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02003154 if (copy_from_sockptr(&value, ov, sizeof(u32)))
Jon Maloy75da2162017-10-13 11:04:23 +02003155 return -EFAULT;
3156 break;
3157 case TIPC_GROUP_JOIN:
3158 if (ol < sizeof(mreq))
3159 return -EINVAL;
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02003160 if (copy_from_sockptr(&mreq, ov, sizeof(mreq)))
Jon Maloy75da2162017-10-13 11:04:23 +02003161 return -EFAULT;
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -05003162 break;
3163 default:
Christoph Hellwiga7b75c52020-07-23 08:09:07 +02003164 if (!sockptr_is_null(ov) || ol)
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -05003165 return -EINVAL;
3166 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01003167
Allan Stephens0c3141e2008-04-15 00:22:02 -07003168 lock_sock(sk);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003169
Per Lidenb97bf3f2006-01-02 19:04:38 +01003170 switch (opt) {
3171 case TIPC_IMPORTANCE:
Christoph Hellwig095ae612020-05-28 07:12:36 +02003172 res = tsk_set_importance(sk, value);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003173 break;
3174 case TIPC_SRC_DROPPABLE:
3175 if (sock->type != SOCK_STREAM)
Jon Paul Maloy301bae52014-08-22 18:09:20 -04003176 tsk_set_unreliable(tsk, value);
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003177 else
Per Lidenb97bf3f2006-01-02 19:04:38 +01003178 res = -ENOPROTOOPT;
3179 break;
3180 case TIPC_DEST_DROPPABLE:
Jon Paul Maloy301bae52014-08-22 18:09:20 -04003181 tsk_set_unreturnable(tsk, value);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003182 break;
3183 case TIPC_CONN_TIMEOUT:
Allan Stephensa0f40f02011-05-26 13:44:34 -04003184 tipc_sk(sk)->conn_timeout = value;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003185 break;
Jon Paul Maloy01fd12b2017-01-18 13:50:53 -05003186 case TIPC_MCAST_BROADCAST:
3187 tsk->mc_method.rcast = false;
3188 tsk->mc_method.mandatory = true;
3189 break;
3190 case TIPC_MCAST_REPLICAST:
3191 tsk->mc_method.rcast = true;
3192 tsk->mc_method.mandatory = true;
3193 break;
Jon Maloy75da2162017-10-13 11:04:23 +02003194 case TIPC_GROUP_JOIN:
3195 res = tipc_sk_join(tsk, &mreq);
3196 break;
3197 case TIPC_GROUP_LEAVE:
3198 res = tipc_sk_leave(tsk);
3199 break;
Jon Maloyc0bceb92019-10-30 14:00:41 +01003200 case TIPC_NODELAY:
3201 tsk->nodelay = !!value;
3202 tsk_set_nagle(tsk);
3203 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003204 default:
3205 res = -EINVAL;
3206 }
3207
Allan Stephens0c3141e2008-04-15 00:22:02 -07003208 release_sock(sk);
3209
Per Lidenb97bf3f2006-01-02 19:04:38 +01003210 return res;
3211}
3212
3213/**
Ying Xue247f0f32014-02-18 16:06:46 +08003214 * tipc_getsockopt - get socket option
Per Lidenb97bf3f2006-01-02 19:04:38 +01003215 * @sock: socket structure
3216 * @lvl: option level
3217 * @opt: option identifier
3218 * @ov: receptacle for option value
3219 * @ol: receptacle for length of option value
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003220 *
3221 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
Per Lidenb97bf3f2006-01-02 19:04:38 +01003222 * (to ease compatibility).
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003223 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08003224 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01003225 */
Ying Xue247f0f32014-02-18 16:06:46 +08003226static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
3227 char __user *ov, int __user *ol)
Per Lidenb97bf3f2006-01-02 19:04:38 +01003228{
Allan Stephens0c3141e2008-04-15 00:22:02 -07003229 struct sock *sk = sock->sk;
Jon Paul Maloy58ed9442014-03-12 11:31:12 -04003230 struct tipc_sock *tsk = tipc_sk(sk);
Jon Maloyb6f88d92020-11-25 13:29:15 -05003231 struct tipc_service_range seq;
Jon Maloy75da2162017-10-13 11:04:23 +02003232 int len, scope;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003233 u32 value;
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003234 int res;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003235
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003236 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
3237 return put_user(0, ol);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003238 if (lvl != SOL_TIPC)
3239 return -ENOPROTOOPT;
Allan Stephens2db99832010-12-31 18:59:33 +00003240 res = get_user(len, ol);
3241 if (res)
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003242 return res;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003243
Allan Stephens0c3141e2008-04-15 00:22:02 -07003244 lock_sock(sk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003245
3246 switch (opt) {
3247 case TIPC_IMPORTANCE:
Jon Paul Maloy301bae52014-08-22 18:09:20 -04003248 value = tsk_importance(tsk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003249 break;
3250 case TIPC_SRC_DROPPABLE:
Jon Paul Maloy301bae52014-08-22 18:09:20 -04003251 value = tsk_unreliable(tsk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003252 break;
3253 case TIPC_DEST_DROPPABLE:
Jon Paul Maloy301bae52014-08-22 18:09:20 -04003254 value = tsk_unreturnable(tsk);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003255 break;
3256 case TIPC_CONN_TIMEOUT:
Jon Paul Maloy301bae52014-08-22 18:09:20 -04003257 value = tsk->conn_timeout;
Allan Stephens0c3141e2008-04-15 00:22:02 -07003258 /* no need to set "res", since already 0 at this point */
Per Lidenb97bf3f2006-01-02 19:04:38 +01003259 break;
Allan Stephens0e659672010-12-31 18:59:32 +00003260 case TIPC_NODE_RECVQ_DEPTH:
Ying Xue9da3d472012-11-27 06:15:27 -05003261 value = 0; /* was tipc_queue_size, now obsolete */
oscar.medina@motorola.com66506132009-06-30 03:25:39 +00003262 break;
Allan Stephens0e659672010-12-31 18:59:32 +00003263 case TIPC_SOCK_RECVQ_DEPTH:
oscar.medina@motorola.com66506132009-06-30 03:25:39 +00003264 value = skb_queue_len(&sk->sk_receive_queue);
3265 break;
Tung Nguyen42e54252019-04-18 21:02:19 +07003266 case TIPC_SOCK_RECVQ_USED:
3267 value = sk_rmem_alloc_get(sk);
3268 break;
Jon Maloy75da2162017-10-13 11:04:23 +02003269 case TIPC_GROUP_JOIN:
3270 seq.type = 0;
3271 if (tsk->group)
3272 tipc_group_self(tsk->group, &seq, &scope);
3273 value = seq.type;
3274 break;
Per Lidenb97bf3f2006-01-02 19:04:38 +01003275 default:
3276 res = -EINVAL;
3277 }
3278
Allan Stephens0c3141e2008-04-15 00:22:02 -07003279 release_sock(sk);
3280
Paul Gortmaker25860c32010-12-31 18:59:31 +00003281 if (res)
3282 return res; /* "get" failed */
Per Lidenb97bf3f2006-01-02 19:04:38 +01003283
Paul Gortmaker25860c32010-12-31 18:59:31 +00003284 if (len < sizeof(value))
3285 return -EINVAL;
3286
3287 if (copy_to_user(ov, &value, sizeof(value)))
3288 return -EFAULT;
3289
3290 return put_user(sizeof(value), ol);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003291}
3292
Ying Xuef2f98002015-01-09 15:27:05 +08003293static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
Erik Hugne78acb1f2014-04-24 16:26:47 +02003294{
Jon Maloy3e5cf362018-04-25 19:29:36 +02003295 struct net *net = sock_net(sock->sk);
3296 struct tipc_sioc_nodeid_req nr = {0};
Erik Hugne78acb1f2014-04-24 16:26:47 +02003297 struct tipc_sioc_ln_req lnr;
3298 void __user *argp = (void __user *)arg;
3299
3300 switch (cmd) {
3301 case SIOCGETLINKNAME:
3302 if (copy_from_user(&lnr, argp, sizeof(lnr)))
3303 return -EFAULT;
Jon Maloy3e5cf362018-04-25 19:29:36 +02003304 if (!tipc_node_get_linkname(net,
Ying Xuef2f98002015-01-09 15:27:05 +08003305 lnr.bearer_id & 0xffff, lnr.peer,
Erik Hugne78acb1f2014-04-24 16:26:47 +02003306 lnr.linkname, TIPC_MAX_LINK_NAME)) {
3307 if (copy_to_user(argp, &lnr, sizeof(lnr)))
3308 return -EFAULT;
3309 return 0;
3310 }
3311 return -EADDRNOTAVAIL;
Jon Maloy3e5cf362018-04-25 19:29:36 +02003312 case SIOCGETNODEID:
3313 if (copy_from_user(&nr, argp, sizeof(nr)))
3314 return -EFAULT;
3315 if (!tipc_node_get_id(net, nr.peer, nr.node_id))
3316 return -EADDRNOTAVAIL;
3317 if (copy_to_user(argp, &nr, sizeof(nr)))
3318 return -EFAULT;
3319 return 0;
Erik Hugne78acb1f2014-04-24 16:26:47 +02003320 default:
3321 return -ENOIOCTLCMD;
3322 }
3323}
3324
Erik Hugne70b03752017-03-29 11:22:16 +02003325static int tipc_socketpair(struct socket *sock1, struct socket *sock2)
3326{
3327 struct tipc_sock *tsk2 = tipc_sk(sock2->sk);
3328 struct tipc_sock *tsk1 = tipc_sk(sock1->sk);
Erik Hugne66bc1e82017-03-29 11:22:17 +02003329 u32 onode = tipc_own_addr(sock_net(sock1->sk));
Erik Hugne70b03752017-03-29 11:22:16 +02003330
Erik Hugne66bc1e82017-03-29 11:22:17 +02003331 tsk1->peer.family = AF_TIPC;
Jon Maloyb6f88d92020-11-25 13:29:15 -05003332 tsk1->peer.addrtype = TIPC_SOCKET_ADDR;
Erik Hugne66bc1e82017-03-29 11:22:17 +02003333 tsk1->peer.scope = TIPC_NODE_SCOPE;
3334 tsk1->peer.addr.id.ref = tsk2->portid;
3335 tsk1->peer.addr.id.node = onode;
3336 tsk2->peer.family = AF_TIPC;
Jon Maloyb6f88d92020-11-25 13:29:15 -05003337 tsk2->peer.addrtype = TIPC_SOCKET_ADDR;
Erik Hugne66bc1e82017-03-29 11:22:17 +02003338 tsk2->peer.scope = TIPC_NODE_SCOPE;
3339 tsk2->peer.addr.id.ref = tsk1->portid;
3340 tsk2->peer.addr.id.node = onode;
3341
3342 tipc_sk_finish_conn(tsk1, tsk2->portid, onode);
3343 tipc_sk_finish_conn(tsk2, tsk1->portid, onode);
Erik Hugne70b03752017-03-29 11:22:16 +02003344 return 0;
3345}
3346
Ben Hutchingsae86b9e2012-07-10 10:55:35 +00003347/* Protocol switches for the various types of TIPC sockets */
3348
Florian Westphalbca65ea2008-02-07 18:18:01 -08003349static const struct proto_ops msg_ops = {
Allan Stephens0e659672010-12-31 18:59:32 +00003350 .owner = THIS_MODULE,
Per Lidenb97bf3f2006-01-02 19:04:38 +01003351 .family = AF_TIPC,
Ying Xue247f0f32014-02-18 16:06:46 +08003352 .release = tipc_release,
3353 .bind = tipc_bind,
3354 .connect = tipc_connect,
Erik Hugne66bc1e82017-03-29 11:22:17 +02003355 .socketpair = tipc_socketpair,
Ying Xue245f3d32011-07-06 06:01:13 -04003356 .accept = sock_no_accept,
Ying Xue247f0f32014-02-18 16:06:46 +08003357 .getname = tipc_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07003358 .poll = tipc_poll,
Erik Hugne78acb1f2014-04-24 16:26:47 +02003359 .ioctl = tipc_ioctl,
Ying Xue245f3d32011-07-06 06:01:13 -04003360 .listen = sock_no_listen,
Ying Xue247f0f32014-02-18 16:06:46 +08003361 .shutdown = tipc_shutdown,
3362 .setsockopt = tipc_setsockopt,
3363 .getsockopt = tipc_getsockopt,
3364 .sendmsg = tipc_sendmsg,
3365 .recvmsg = tipc_recvmsg,
YOSHIFUJI Hideaki82387452007-07-19 10:44:56 +09003366 .mmap = sock_no_mmap,
3367 .sendpage = sock_no_sendpage
Per Lidenb97bf3f2006-01-02 19:04:38 +01003368};
3369
Florian Westphalbca65ea2008-02-07 18:18:01 -08003370static const struct proto_ops packet_ops = {
Allan Stephens0e659672010-12-31 18:59:32 +00003371 .owner = THIS_MODULE,
Per Lidenb97bf3f2006-01-02 19:04:38 +01003372 .family = AF_TIPC,
Ying Xue247f0f32014-02-18 16:06:46 +08003373 .release = tipc_release,
3374 .bind = tipc_bind,
3375 .connect = tipc_connect,
Erik Hugne70b03752017-03-29 11:22:16 +02003376 .socketpair = tipc_socketpair,
Ying Xue247f0f32014-02-18 16:06:46 +08003377 .accept = tipc_accept,
3378 .getname = tipc_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07003379 .poll = tipc_poll,
Erik Hugne78acb1f2014-04-24 16:26:47 +02003380 .ioctl = tipc_ioctl,
Ying Xue247f0f32014-02-18 16:06:46 +08003381 .listen = tipc_listen,
3382 .shutdown = tipc_shutdown,
3383 .setsockopt = tipc_setsockopt,
3384 .getsockopt = tipc_getsockopt,
3385 .sendmsg = tipc_send_packet,
3386 .recvmsg = tipc_recvmsg,
YOSHIFUJI Hideaki82387452007-07-19 10:44:56 +09003387 .mmap = sock_no_mmap,
3388 .sendpage = sock_no_sendpage
Per Lidenb97bf3f2006-01-02 19:04:38 +01003389};
3390
Florian Westphalbca65ea2008-02-07 18:18:01 -08003391static const struct proto_ops stream_ops = {
Allan Stephens0e659672010-12-31 18:59:32 +00003392 .owner = THIS_MODULE,
Per Lidenb97bf3f2006-01-02 19:04:38 +01003393 .family = AF_TIPC,
Ying Xue247f0f32014-02-18 16:06:46 +08003394 .release = tipc_release,
3395 .bind = tipc_bind,
3396 .connect = tipc_connect,
Erik Hugne70b03752017-03-29 11:22:16 +02003397 .socketpair = tipc_socketpair,
Ying Xue247f0f32014-02-18 16:06:46 +08003398 .accept = tipc_accept,
3399 .getname = tipc_getname,
Linus Torvaldsa11e1d42018-06-28 09:43:44 -07003400 .poll = tipc_poll,
Erik Hugne78acb1f2014-04-24 16:26:47 +02003401 .ioctl = tipc_ioctl,
Ying Xue247f0f32014-02-18 16:06:46 +08003402 .listen = tipc_listen,
3403 .shutdown = tipc_shutdown,
3404 .setsockopt = tipc_setsockopt,
3405 .getsockopt = tipc_getsockopt,
Jon Paul Maloy365ad352017-01-03 10:55:11 -05003406 .sendmsg = tipc_sendstream,
Jon Paul Maloyec8a09f2017-05-02 18:16:54 +02003407 .recvmsg = tipc_recvstream,
YOSHIFUJI Hideaki82387452007-07-19 10:44:56 +09003408 .mmap = sock_no_mmap,
3409 .sendpage = sock_no_sendpage
Per Lidenb97bf3f2006-01-02 19:04:38 +01003410};
3411
Florian Westphalbca65ea2008-02-07 18:18:01 -08003412static const struct net_proto_family tipc_family_ops = {
Allan Stephens0e659672010-12-31 18:59:32 +00003413 .owner = THIS_MODULE,
Per Lidenb97bf3f2006-01-02 19:04:38 +01003414 .family = AF_TIPC,
Ying Xuec5fa7b32013-06-17 10:54:39 -04003415 .create = tipc_sk_create
Per Lidenb97bf3f2006-01-02 19:04:38 +01003416};
3417
3418static struct proto tipc_proto = {
3419 .name = "TIPC",
3420 .owner = THIS_MODULE,
Ying Xuecc79dd12013-06-17 10:54:37 -04003421 .obj_size = sizeof(struct tipc_sock),
3422 .sysctl_rmem = sysctl_tipc_rmem
Per Lidenb97bf3f2006-01-02 19:04:38 +01003423};
3424
3425/**
Per Liden4323add2006-01-18 00:38:21 +01003426 * tipc_socket_init - initialize TIPC socket interface
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003427 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08003428 * Return: 0 on success, errno otherwise
Per Lidenb97bf3f2006-01-02 19:04:38 +01003429 */
Per Liden4323add2006-01-18 00:38:21 +01003430int tipc_socket_init(void)
Per Lidenb97bf3f2006-01-02 19:04:38 +01003431{
3432 int res;
3433
YOSHIFUJI Hideakic4307282007-02-09 23:25:21 +09003434 res = proto_register(&tipc_proto, 1);
Per Lidenb97bf3f2006-01-02 19:04:38 +01003435 if (res) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04003436 pr_err("Failed to register TIPC protocol type\n");
Per Lidenb97bf3f2006-01-02 19:04:38 +01003437 goto out;
3438 }
3439
3440 res = sock_register(&tipc_family_ops);
3441 if (res) {
Erik Hugne2cf8aa12012-06-29 00:16:37 -04003442 pr_err("Failed to register TIPC socket type\n");
Per Lidenb97bf3f2006-01-02 19:04:38 +01003443 proto_unregister(&tipc_proto);
3444 goto out;
3445 }
Per Lidenb97bf3f2006-01-02 19:04:38 +01003446 out:
3447 return res;
3448}
3449
3450/**
Per Liden4323add2006-01-18 00:38:21 +01003451 * tipc_socket_stop - stop TIPC socket interface
Per Lidenb97bf3f2006-01-02 19:04:38 +01003452 */
Per Liden4323add2006-01-18 00:38:21 +01003453void tipc_socket_stop(void)
Per Lidenb97bf3f2006-01-02 19:04:38 +01003454{
Per Lidenb97bf3f2006-01-02 19:04:38 +01003455 sock_unregister(tipc_family_ops.family);
3456 proto_unregister(&tipc_proto);
3457}
Richard Alpe34b78a122014-11-20 10:29:10 +01003458
3459/* Caller should hold socket lock for the passed tipc socket. */
Richard Alped8182802014-11-24 11:10:29 +01003460static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
Richard Alpe34b78a122014-11-20 10:29:10 +01003461{
3462 u32 peer_node;
3463 u32 peer_port;
3464 struct nlattr *nest;
3465
3466 peer_node = tsk_peer_node(tsk);
3467 peer_port = tsk_peer_port(tsk);
3468
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003469 nest = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_CON);
Kangjie Lu517ccc22019-03-16 16:46:05 -05003470 if (!nest)
3471 return -EMSGSIZE;
Richard Alpe34b78a122014-11-20 10:29:10 +01003472
3473 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
3474 goto msg_full;
3475 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
3476 goto msg_full;
3477
3478 if (tsk->conn_type != 0) {
3479 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
3480 goto msg_full;
3481 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
3482 goto msg_full;
3483 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
3484 goto msg_full;
3485 }
3486 nla_nest_end(skb, nest);
3487
3488 return 0;
3489
3490msg_full:
3491 nla_nest_cancel(skb, nest);
3492
3493 return -EMSGSIZE;
3494}
3495
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003496static int __tipc_nl_add_sk_info(struct sk_buff *skb, struct tipc_sock
3497 *tsk)
3498{
3499 struct net *net = sock_net(skb->sk);
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003500 struct sock *sk = &tsk->sk;
3501
3502 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid) ||
Jon Maloy23fd3ea2018-03-22 20:42:49 +01003503 nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr(net)))
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003504 return -EMSGSIZE;
3505
3506 if (tipc_sk_connected(sk)) {
3507 if (__tipc_nl_add_sk_con(skb, tsk))
3508 return -EMSGSIZE;
3509 } else if (!list_empty(&tsk->publications)) {
3510 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
3511 return -EMSGSIZE;
3512 }
3513 return 0;
3514}
3515
Richard Alpe34b78a122014-11-20 10:29:10 +01003516/* Caller should hold socket lock for the passed tipc socket. */
Richard Alped8182802014-11-24 11:10:29 +01003517static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
3518 struct tipc_sock *tsk)
Richard Alpe34b78a122014-11-20 10:29:10 +01003519{
Richard Alpe34b78a122014-11-20 10:29:10 +01003520 struct nlattr *attrs;
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003521 void *hdr;
Richard Alpe34b78a122014-11-20 10:29:10 +01003522
3523 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
Richard Alpebfb3e5d2015-02-09 09:50:03 +01003524 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
Richard Alpe34b78a122014-11-20 10:29:10 +01003525 if (!hdr)
3526 goto msg_cancel;
3527
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003528 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
Richard Alpe34b78a122014-11-20 10:29:10 +01003529 if (!attrs)
3530 goto genlmsg_cancel;
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003531
3532 if (__tipc_nl_add_sk_info(skb, tsk))
Richard Alpe34b78a122014-11-20 10:29:10 +01003533 goto attr_msg_cancel;
3534
Richard Alpe34b78a122014-11-20 10:29:10 +01003535 nla_nest_end(skb, attrs);
3536 genlmsg_end(skb, hdr);
3537
3538 return 0;
3539
3540attr_msg_cancel:
3541 nla_nest_cancel(skb, attrs);
3542genlmsg_cancel:
3543 genlmsg_cancel(skb, hdr);
3544msg_cancel:
3545 return -EMSGSIZE;
3546}
3547
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003548int tipc_nl_sk_walk(struct sk_buff *skb, struct netlink_callback *cb,
3549 int (*skb_handler)(struct sk_buff *skb,
3550 struct netlink_callback *cb,
3551 struct tipc_sock *tsk))
Richard Alpe34b78a122014-11-20 10:29:10 +01003552{
Cong Wang8f5c5fc2018-09-04 14:54:55 -07003553 struct rhashtable_iter *iter = (void *)cb->args[4];
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003554 struct tipc_sock *tsk;
3555 int err;
Richard Alpe34b78a122014-11-20 10:29:10 +01003556
Cong Wang9a07efa2018-08-24 12:28:06 -07003557 rhashtable_walk_start(iter);
3558 while ((tsk = rhashtable_walk_next(iter)) != NULL) {
3559 if (IS_ERR(tsk)) {
3560 err = PTR_ERR(tsk);
3561 if (err == -EAGAIN) {
3562 err = 0;
Richard Alped6e164e2015-01-16 12:30:40 +01003563 continue;
3564 }
Cong Wang9a07efa2018-08-24 12:28:06 -07003565 break;
Ying Xue07f6c4b2015-01-07 13:41:58 +08003566 }
Richard Alpe34b78a122014-11-20 10:29:10 +01003567
Cong Wang9a07efa2018-08-24 12:28:06 -07003568 sock_hold(&tsk->sk);
3569 rhashtable_walk_stop(iter);
3570 lock_sock(&tsk->sk);
3571 err = skb_handler(skb, cb, tsk);
3572 if (err) {
3573 release_sock(&tsk->sk);
3574 sock_put(&tsk->sk);
3575 goto out;
3576 }
3577 release_sock(&tsk->sk);
3578 rhashtable_walk_start(iter);
3579 sock_put(&tsk->sk);
3580 }
3581 rhashtable_walk_stop(iter);
3582out:
Richard Alpe34b78a122014-11-20 10:29:10 +01003583 return skb->len;
3584}
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003585EXPORT_SYMBOL(tipc_nl_sk_walk);
3586
Cong Wang9a07efa2018-08-24 12:28:06 -07003587int tipc_dump_start(struct netlink_callback *cb)
3588{
Cong Wang8f5c5fc2018-09-04 14:54:55 -07003589 return __tipc_dump_start(cb, sock_net(cb->skb->sk));
3590}
3591EXPORT_SYMBOL(tipc_dump_start);
3592
3593int __tipc_dump_start(struct netlink_callback *cb, struct net *net)
3594{
3595 /* tipc_nl_name_table_dump() uses cb->args[0...3]. */
3596 struct rhashtable_iter *iter = (void *)cb->args[4];
Cong Wang9a07efa2018-08-24 12:28:06 -07003597 struct tipc_net *tn = tipc_net(net);
3598
3599 if (!iter) {
3600 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3601 if (!iter)
3602 return -ENOMEM;
3603
Cong Wang8f5c5fc2018-09-04 14:54:55 -07003604 cb->args[4] = (long)iter;
Cong Wang9a07efa2018-08-24 12:28:06 -07003605 }
3606
3607 rhashtable_walk_enter(&tn->sk_rht, iter);
3608 return 0;
3609}
Cong Wang9a07efa2018-08-24 12:28:06 -07003610
3611int tipc_dump_done(struct netlink_callback *cb)
3612{
Cong Wang8f5c5fc2018-09-04 14:54:55 -07003613 struct rhashtable_iter *hti = (void *)cb->args[4];
Cong Wang9a07efa2018-08-24 12:28:06 -07003614
3615 rhashtable_walk_exit(hti);
3616 kfree(hti);
3617 return 0;
3618}
3619EXPORT_SYMBOL(tipc_dump_done);
3620
Cong Wange41f0542018-04-06 18:54:52 -07003621int tipc_sk_fill_sock_diag(struct sk_buff *skb, struct netlink_callback *cb,
3622 struct tipc_sock *tsk, u32 sk_filter_state,
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003623 u64 (*tipc_diag_gen_cookie)(struct sock *sk))
3624{
3625 struct sock *sk = &tsk->sk;
3626 struct nlattr *attrs;
3627 struct nlattr *stat;
3628
3629 /*filter response w.r.t sk_state*/
3630 if (!(sk_filter_state & (1 << sk->sk_state)))
3631 return 0;
3632
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003633 attrs = nla_nest_start_noflag(skb, TIPC_NLA_SOCK);
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003634 if (!attrs)
3635 goto msg_cancel;
3636
3637 if (__tipc_nl_add_sk_info(skb, tsk))
3638 goto attr_msg_cancel;
3639
3640 if (nla_put_u32(skb, TIPC_NLA_SOCK_TYPE, (u32)sk->sk_type) ||
3641 nla_put_u32(skb, TIPC_NLA_SOCK_TIPC_STATE, (u32)sk->sk_state) ||
3642 nla_put_u32(skb, TIPC_NLA_SOCK_INO, sock_i_ino(sk)) ||
3643 nla_put_u32(skb, TIPC_NLA_SOCK_UID,
Cong Wange41f0542018-04-06 18:54:52 -07003644 from_kuid_munged(sk_user_ns(NETLINK_CB(cb->skb).sk),
GhantaKrishnamurthy MohanKrishna4b2e6872018-04-04 14:49:47 +02003645 sock_i_uid(sk))) ||
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003646 nla_put_u64_64bit(skb, TIPC_NLA_SOCK_COOKIE,
3647 tipc_diag_gen_cookie(sk),
3648 TIPC_NLA_SOCK_PAD))
3649 goto attr_msg_cancel;
3650
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003651 stat = nla_nest_start_noflag(skb, TIPC_NLA_SOCK_STAT);
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003652 if (!stat)
3653 goto attr_msg_cancel;
3654
3655 if (nla_put_u32(skb, TIPC_NLA_SOCK_STAT_RCVQ,
3656 skb_queue_len(&sk->sk_receive_queue)) ||
3657 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_SENDQ,
GhantaKrishnamurthy MohanKrishna872619d2018-03-21 14:37:45 +01003658 skb_queue_len(&sk->sk_write_queue)) ||
3659 nla_put_u32(skb, TIPC_NLA_SOCK_STAT_DROP,
3660 atomic_read(&sk->sk_drops)))
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003661 goto stat_msg_cancel;
3662
3663 if (tsk->cong_link_cnt &&
3664 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_LINK_CONG))
3665 goto stat_msg_cancel;
3666
3667 if (tsk_conn_cong(tsk) &&
3668 nla_put_flag(skb, TIPC_NLA_SOCK_STAT_CONN_CONG))
3669 goto stat_msg_cancel;
3670
3671 nla_nest_end(skb, stat);
GhantaKrishnamurthy MohanKrishnaa1be5a22018-06-29 13:26:18 +02003672
3673 if (tsk->group)
3674 if (tipc_group_fill_sock_diag(tsk->group, skb))
3675 goto stat_msg_cancel;
3676
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003677 nla_nest_end(skb, attrs);
3678
3679 return 0;
3680
3681stat_msg_cancel:
3682 nla_nest_cancel(skb, stat);
3683attr_msg_cancel:
3684 nla_nest_cancel(skb, attrs);
3685msg_cancel:
3686 return -EMSGSIZE;
3687}
3688EXPORT_SYMBOL(tipc_sk_fill_sock_diag);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003689
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003690int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
3691{
GhantaKrishnamurthy MohanKrishnac30b70d2018-03-21 14:37:44 +01003692 return tipc_nl_sk_walk(skb, cb, __tipc_nl_add_sk);
GhantaKrishnamurthy MohanKrishnadfde3312018-03-21 14:37:43 +01003693}
3694
Richard Alpe1a1a1432014-11-20 10:29:11 +01003695/* Caller should hold socket lock for the passed tipc socket. */
Richard Alped8182802014-11-24 11:10:29 +01003696static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
3697 struct netlink_callback *cb,
3698 struct publication *publ)
Richard Alpe1a1a1432014-11-20 10:29:11 +01003699{
3700 void *hdr;
3701 struct nlattr *attrs;
3702
3703 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
Richard Alpebfb3e5d2015-02-09 09:50:03 +01003704 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003705 if (!hdr)
3706 goto msg_cancel;
3707
Michal Kubecekae0be8d2019-04-26 11:13:06 +02003708 attrs = nla_nest_start_noflag(skb, TIPC_NLA_PUBL);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003709 if (!attrs)
3710 goto genlmsg_cancel;
3711
3712 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
3713 goto attr_msg_cancel;
Jon Maloy998d3902021-03-16 22:06:08 -04003714 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->sr.type))
Richard Alpe1a1a1432014-11-20 10:29:11 +01003715 goto attr_msg_cancel;
Jon Maloy998d3902021-03-16 22:06:08 -04003716 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->sr.lower))
Richard Alpe1a1a1432014-11-20 10:29:11 +01003717 goto attr_msg_cancel;
Jon Maloy998d3902021-03-16 22:06:08 -04003718 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->sr.upper))
Richard Alpe1a1a1432014-11-20 10:29:11 +01003719 goto attr_msg_cancel;
3720
3721 nla_nest_end(skb, attrs);
3722 genlmsg_end(skb, hdr);
3723
3724 return 0;
3725
3726attr_msg_cancel:
3727 nla_nest_cancel(skb, attrs);
3728genlmsg_cancel:
3729 genlmsg_cancel(skb, hdr);
3730msg_cancel:
3731 return -EMSGSIZE;
3732}
3733
3734/* Caller should hold socket lock for the passed tipc socket. */
Richard Alped8182802014-11-24 11:10:29 +01003735static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
3736 struct netlink_callback *cb,
3737 struct tipc_sock *tsk, u32 *last_publ)
Richard Alpe1a1a1432014-11-20 10:29:11 +01003738{
3739 int err;
3740 struct publication *p;
3741
3742 if (*last_publ) {
Jon Maloye50e73e2018-03-15 16:48:55 +01003743 list_for_each_entry(p, &tsk->publications, binding_sock) {
Richard Alpe1a1a1432014-11-20 10:29:11 +01003744 if (p->key == *last_publ)
3745 break;
3746 }
3747 if (p->key != *last_publ) {
3748 /* We never set seq or call nl_dump_check_consistent()
3749 * this means that setting prev_seq here will cause the
3750 * consistence check to fail in the netlink callback
3751 * handler. Resulting in the last NLMSG_DONE message
3752 * having the NLM_F_DUMP_INTR flag set.
3753 */
3754 cb->prev_seq = 1;
3755 *last_publ = 0;
3756 return -EPIPE;
3757 }
3758 } else {
3759 p = list_first_entry(&tsk->publications, struct publication,
Jon Maloye50e73e2018-03-15 16:48:55 +01003760 binding_sock);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003761 }
3762
Jon Maloye50e73e2018-03-15 16:48:55 +01003763 list_for_each_entry_from(p, &tsk->publications, binding_sock) {
Richard Alpe1a1a1432014-11-20 10:29:11 +01003764 err = __tipc_nl_add_sk_publ(skb, cb, p);
3765 if (err) {
3766 *last_publ = p->key;
3767 return err;
3768 }
3769 }
3770 *last_publ = 0;
3771
3772 return 0;
3773}
3774
3775int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
3776{
3777 int err;
Ying Xue07f6c4b2015-01-07 13:41:58 +08003778 u32 tsk_portid = cb->args[0];
Richard Alpe1a1a1432014-11-20 10:29:11 +01003779 u32 last_publ = cb->args[1];
3780 u32 done = cb->args[2];
Ying Xuee05b31f2015-01-09 15:27:08 +08003781 struct net *net = sock_net(skb->sk);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003782 struct tipc_sock *tsk;
3783
Ying Xue07f6c4b2015-01-07 13:41:58 +08003784 if (!tsk_portid) {
Jiri Pirko057af702019-10-05 20:04:39 +02003785 struct nlattr **attrs = genl_dumpit_info(cb)->attrs;
Richard Alpe1a1a1432014-11-20 10:29:11 +01003786 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
3787
Richard Alpe45e093a2016-05-16 11:14:54 +02003788 if (!attrs[TIPC_NLA_SOCK])
3789 return -EINVAL;
3790
Johannes Berg8cb08172019-04-26 14:07:28 +02003791 err = nla_parse_nested_deprecated(sock, TIPC_NLA_SOCK_MAX,
3792 attrs[TIPC_NLA_SOCK],
3793 tipc_nl_sock_policy, NULL);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003794 if (err)
3795 return err;
3796
3797 if (!sock[TIPC_NLA_SOCK_REF])
3798 return -EINVAL;
3799
Ying Xue07f6c4b2015-01-07 13:41:58 +08003800 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003801 }
3802
3803 if (done)
3804 return 0;
3805
Ying Xuee05b31f2015-01-09 15:27:08 +08003806 tsk = tipc_sk_lookup(net, tsk_portid);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003807 if (!tsk)
3808 return -EINVAL;
3809
3810 lock_sock(&tsk->sk);
3811 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
3812 if (!err)
3813 done = 1;
3814 release_sock(&tsk->sk);
Ying Xue07f6c4b2015-01-07 13:41:58 +08003815 sock_put(&tsk->sk);
Richard Alpe1a1a1432014-11-20 10:29:11 +01003816
Ying Xue07f6c4b2015-01-07 13:41:58 +08003817 cb->args[0] = tsk_portid;
Richard Alpe1a1a1432014-11-20 10:29:11 +01003818 cb->args[1] = last_publ;
3819 cb->args[2] = done;
3820
3821 return skb->len;
3822}
Tuong Lienb4b97712018-12-19 09:17:56 +07003823
Tuong Lien01e661e2018-12-19 09:17:58 +07003824/**
3825 * tipc_sk_filtering - check if a socket should be traced
3826 * @sk: the socket to be examined
Randy Dunlapf172f4b2020-11-29 10:32:49 -08003827 *
3828 * @sysctl_tipc_sk_filter is used as the socket tuple for filtering:
Randy Dunlap5fcb7d42020-11-29 10:32:50 -08003829 * (portid, sock type, name type, name lower, name upper)
Tuong Lien01e661e2018-12-19 09:17:58 +07003830 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08003831 * Return: true if the socket meets the socket tuple data
Tuong Lien01e661e2018-12-19 09:17:58 +07003832 * (value 0 = 'any') or when there is no tuple set (all = 0),
3833 * otherwise false
3834 */
3835bool tipc_sk_filtering(struct sock *sk)
3836{
3837 struct tipc_sock *tsk;
3838 struct publication *p;
3839 u32 _port, _sktype, _type, _lower, _upper;
3840 u32 type = 0, lower = 0, upper = 0;
3841
3842 if (!sk)
3843 return true;
3844
3845 tsk = tipc_sk(sk);
3846
3847 _port = sysctl_tipc_sk_filter[0];
3848 _sktype = sysctl_tipc_sk_filter[1];
3849 _type = sysctl_tipc_sk_filter[2];
3850 _lower = sysctl_tipc_sk_filter[3];
3851 _upper = sysctl_tipc_sk_filter[4];
3852
3853 if (!_port && !_sktype && !_type && !_lower && !_upper)
3854 return true;
3855
3856 if (_port)
3857 return (_port == tsk->portid);
3858
3859 if (_sktype && _sktype != sk->sk_type)
3860 return false;
3861
3862 if (tsk->published) {
3863 p = list_first_entry_or_null(&tsk->publications,
3864 struct publication, binding_sock);
3865 if (p) {
Jon Maloy998d3902021-03-16 22:06:08 -04003866 type = p->sr.type;
3867 lower = p->sr.lower;
3868 upper = p->sr.upper;
Tuong Lien01e661e2018-12-19 09:17:58 +07003869 }
3870 }
3871
3872 if (!tipc_sk_type_connectionless(sk)) {
3873 type = tsk->conn_type;
3874 lower = tsk->conn_instance;
3875 upper = tsk->conn_instance;
3876 }
3877
3878 if ((_type && _type != type) || (_lower && _lower != lower) ||
3879 (_upper && _upper != upper))
3880 return false;
3881
3882 return true;
3883}
3884
Tuong Lienb4b97712018-12-19 09:17:56 +07003885u32 tipc_sock_get_portid(struct sock *sk)
3886{
3887 return (sk) ? (tipc_sk(sk))->portid : 0;
3888}
3889
3890/**
Tuong Lien01e661e2018-12-19 09:17:58 +07003891 * tipc_sk_overlimit1 - check if socket rx queue is about to be overloaded,
3892 * both the rcv and backlog queues are considered
3893 * @sk: tipc sk to be checked
3894 * @skb: tipc msg to be checked
3895 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08003896 * Return: true if the socket rx queue allocation is > 90%, otherwise false
Tuong Lien01e661e2018-12-19 09:17:58 +07003897 */
3898
3899bool tipc_sk_overlimit1(struct sock *sk, struct sk_buff *skb)
3900{
3901 atomic_t *dcnt = &tipc_sk(sk)->dupl_rcvcnt;
3902 unsigned int lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
3903 unsigned int qsize = sk->sk_backlog.len + sk_rmem_alloc_get(sk);
3904
3905 return (qsize > lim * 90 / 100);
3906}
3907
3908/**
3909 * tipc_sk_overlimit2 - check if socket rx queue is about to be overloaded,
3910 * only the rcv queue is considered
3911 * @sk: tipc sk to be checked
3912 * @skb: tipc msg to be checked
3913 *
Randy Dunlap637b77f2020-11-29 10:32:48 -08003914 * Return: true if the socket rx queue allocation is > 90%, otherwise false
Tuong Lien01e661e2018-12-19 09:17:58 +07003915 */
3916
3917bool tipc_sk_overlimit2(struct sock *sk, struct sk_buff *skb)
3918{
3919 unsigned int lim = rcvbuf_limit(sk, skb);
3920 unsigned int qsize = sk_rmem_alloc_get(sk);
3921
3922 return (qsize > lim * 90 / 100);
3923}
3924
3925/**
Tuong Lienb4b97712018-12-19 09:17:56 +07003926 * tipc_sk_dump - dump TIPC socket
3927 * @sk: tipc sk to be dumped
3928 * @dqueues: bitmask to decide if any socket queue to be dumped?
3929 * - TIPC_DUMP_NONE: don't dump socket queues
3930 * - TIPC_DUMP_SK_SNDQ: dump socket send queue
3931 * - TIPC_DUMP_SK_RCVQ: dump socket rcv queue
3932 * - TIPC_DUMP_SK_BKLGQ: dump socket backlog queue
3933 * - TIPC_DUMP_ALL: dump all the socket queues above
3934 * @buf: returned buffer of dump data in format
3935 */
3936int tipc_sk_dump(struct sock *sk, u16 dqueues, char *buf)
3937{
3938 int i = 0;
3939 size_t sz = (dqueues) ? SK_LMAX : SK_LMIN;
3940 struct tipc_sock *tsk;
3941 struct publication *p;
3942 bool tsk_connected;
3943
3944 if (!sk) {
3945 i += scnprintf(buf, sz, "sk data: (null)\n");
3946 return i;
3947 }
3948
3949 tsk = tipc_sk(sk);
3950 tsk_connected = !tipc_sk_type_connectionless(sk);
3951
3952 i += scnprintf(buf, sz, "sk data: %u", sk->sk_type);
3953 i += scnprintf(buf + i, sz - i, " %d", sk->sk_state);
3954 i += scnprintf(buf + i, sz - i, " %x", tsk_own_node(tsk));
3955 i += scnprintf(buf + i, sz - i, " %u", tsk->portid);
3956 i += scnprintf(buf + i, sz - i, " | %u", tsk_connected);
3957 if (tsk_connected) {
3958 i += scnprintf(buf + i, sz - i, " %x", tsk_peer_node(tsk));
3959 i += scnprintf(buf + i, sz - i, " %u", tsk_peer_port(tsk));
3960 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_type);
3961 i += scnprintf(buf + i, sz - i, " %u", tsk->conn_instance);
3962 }
3963 i += scnprintf(buf + i, sz - i, " | %u", tsk->published);
3964 if (tsk->published) {
3965 p = list_first_entry_or_null(&tsk->publications,
3966 struct publication, binding_sock);
Jon Maloy998d3902021-03-16 22:06:08 -04003967 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.type : 0);
3968 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.lower : 0);
3969 i += scnprintf(buf + i, sz - i, " %u", (p) ? p->sr.upper : 0);
Tuong Lienb4b97712018-12-19 09:17:56 +07003970 }
3971 i += scnprintf(buf + i, sz - i, " | %u", tsk->snd_win);
3972 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_win);
3973 i += scnprintf(buf + i, sz - i, " %u", tsk->max_pkt);
3974 i += scnprintf(buf + i, sz - i, " %x", tsk->peer_caps);
3975 i += scnprintf(buf + i, sz - i, " %u", tsk->cong_link_cnt);
3976 i += scnprintf(buf + i, sz - i, " %u", tsk->snt_unacked);
3977 i += scnprintf(buf + i, sz - i, " %u", tsk->rcv_unacked);
3978 i += scnprintf(buf + i, sz - i, " %u", atomic_read(&tsk->dupl_rcvcnt));
3979 i += scnprintf(buf + i, sz - i, " %u", sk->sk_shutdown);
3980 i += scnprintf(buf + i, sz - i, " | %d", sk_wmem_alloc_get(sk));
3981 i += scnprintf(buf + i, sz - i, " %d", sk->sk_sndbuf);
3982 i += scnprintf(buf + i, sz - i, " | %d", sk_rmem_alloc_get(sk));
3983 i += scnprintf(buf + i, sz - i, " %d", sk->sk_rcvbuf);
Eric Dumazet70c26552019-10-09 15:41:03 -07003984 i += scnprintf(buf + i, sz - i, " | %d\n", READ_ONCE(sk->sk_backlog.len));
Tuong Lienb4b97712018-12-19 09:17:56 +07003985
3986 if (dqueues & TIPC_DUMP_SK_SNDQ) {
3987 i += scnprintf(buf + i, sz - i, "sk_write_queue: ");
3988 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i);
3989 }
3990
3991 if (dqueues & TIPC_DUMP_SK_RCVQ) {
3992 i += scnprintf(buf + i, sz - i, "sk_receive_queue: ");
3993 i += tipc_list_dump(&sk->sk_receive_queue, false, buf + i);
3994 }
3995
3996 if (dqueues & TIPC_DUMP_SK_BKLGQ) {
3997 i += scnprintf(buf + i, sz - i, "sk_backlog:\n head ");
3998 i += tipc_skb_dump(sk->sk_backlog.head, false, buf + i);
3999 if (sk->sk_backlog.tail != sk->sk_backlog.head) {
4000 i += scnprintf(buf + i, sz - i, " tail ");
4001 i += tipc_skb_dump(sk->sk_backlog.tail, false,
4002 buf + i);
4003 }
4004 }
4005
4006 return i;
4007}