blob: ab7356e0ba8334ffa2b9cac5ca888ef8bbc48d3c [file] [log] [blame]
Andy Grover70041082009-08-21 12:28:31 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover70041082009-08-21 12:28:31 +000035#include <linux/in.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040036#include <linux/module.h>
Andy Grover70041082009-08-21 12:28:31 +000037#include <net/tcp.h>
Sowmini Varadhan467fa152015-08-05 01:43:26 -040038#include <net/net_namespace.h>
39#include <net/netns/generic.h>
Andy Grover70041082009-08-21 12:28:31 +000040
41#include "rds.h"
42#include "tcp.h"
43
44/* only for info exporting */
45static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
46static LIST_HEAD(rds_tcp_tc_list);
stephen hemmingerff51bf82010-10-19 08:08:33 +000047static unsigned int rds_tcp_tc_count;
Andy Grover70041082009-08-21 12:28:31 +000048
49/* Track rds_tcp_connection structs so they can be cleaned up */
50static DEFINE_SPINLOCK(rds_tcp_conn_lock);
51static LIST_HEAD(rds_tcp_conn_list);
52
53static struct kmem_cache *rds_tcp_conn_slab;
54
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -070055static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
56 void __user *buffer, size_t *lenp,
57 loff_t *fpos);
58
Wei Yongjunaf73e722016-06-17 18:12:46 +000059static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
60static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -070061
62static struct ctl_table rds_tcp_sysctl_table[] = {
63#define RDS_TCP_SNDBUF 0
64 {
65 .procname = "rds_tcp_sndbuf",
66 /* data is per-net pointer */
67 .maxlen = sizeof(int),
68 .mode = 0644,
69 .proc_handler = rds_tcp_skbuf_handler,
70 .extra1 = &rds_tcp_min_sndbuf,
71 },
72#define RDS_TCP_RCVBUF 1
73 {
74 .procname = "rds_tcp_rcvbuf",
75 /* data is per-net pointer */
76 .maxlen = sizeof(int),
77 .mode = 0644,
78 .proc_handler = rds_tcp_skbuf_handler,
79 .extra1 = &rds_tcp_min_rcvbuf,
80 },
81 { }
82};
83
Andy Grover70041082009-08-21 12:28:31 +000084/* doing it this way avoids calling tcp_sk() */
85void rds_tcp_nonagle(struct socket *sock)
86{
Andy Grover70041082009-08-21 12:28:31 +000087 int val = 1;
88
Al Viroe73a67f2017-03-18 21:20:27 -040089 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (void *)&val,
Andy Grover70041082009-08-21 12:28:31 +000090 sizeof(val));
Andy Grover70041082009-08-21 12:28:31 +000091}
92
Sowmini Varadhanb5895132018-01-18 13:11:07 -080093u32 rds_tcp_write_seq(struct rds_tcp_connection *tc)
Andy Grover70041082009-08-21 12:28:31 +000094{
Sowmini Varadhanb5895132018-01-18 13:11:07 -080095 /* seq# of the last byte of data in tcp send buffer */
96 return tcp_sk(tc->t_sock->sk)->write_seq;
Andy Grover70041082009-08-21 12:28:31 +000097}
98
99u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
100{
101 return tcp_sk(tc->t_sock->sk)->snd_una;
102}
103
104void rds_tcp_restore_callbacks(struct socket *sock,
105 struct rds_tcp_connection *tc)
106{
107 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
108 write_lock_bh(&sock->sk->sk_callback_lock);
109
110 /* done under the callback_lock to serialize with write_space */
111 spin_lock(&rds_tcp_tc_list_lock);
112 list_del_init(&tc->t_list_item);
113 rds_tcp_tc_count--;
114 spin_unlock(&rds_tcp_tc_list_lock);
115
116 tc->t_sock = NULL;
117
118 sock->sk->sk_write_space = tc->t_orig_write_space;
119 sock->sk->sk_data_ready = tc->t_orig_data_ready;
120 sock->sk->sk_state_change = tc->t_orig_state_change;
121 sock->sk->sk_user_data = NULL;
122
123 write_unlock_bh(&sock->sk->sk_callback_lock);
124}
125
126/*
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700127 * rds_tcp_reset_callbacks() switches the to the new sock and
128 * returns the existing tc->t_sock.
129 *
130 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
131 * and rds_tcp_reset_callbacks. Send and receive trust that
132 * it is set. The absence of RDS_CONN_UP bit protects those paths
133 * from being called while it isn't set.
134 */
135void rds_tcp_reset_callbacks(struct socket *sock,
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700136 struct rds_conn_path *cp)
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700137{
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700138 struct rds_tcp_connection *tc = cp->cp_transport_data;
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700139 struct socket *osock = tc->t_sock;
140
141 if (!osock)
142 goto newsock;
143
144 /* Need to resolve a duelling SYN between peers.
145 * We have an outstanding SYN to this peer, which may
146 * potentially have transitioned to the RDS_CONN_UP state,
147 * so we must quiesce any send threads before resetting
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700148 * cp_transport_data. We quiesce these threads by setting
149 * cp_state to something other than RDS_CONN_UP, and then
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700150 * waiting for any existing threads in rds_send_xmit to
151 * complete release_in_xmit(). (Subsequent threads entering
152 * rds_send_xmit() will bail on !rds_conn_up().
Sowmini Varadhan9c794402016-06-04 14:00:00 -0700153 *
154 * However an incoming syn-ack at this point would end up
155 * marking the conn as RDS_CONN_UP, and would again permit
156 * rds_send_xmi() threads through, so ideally we would
157 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
158 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
159 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
160 * would not get set. As a result, we set c_state to
161 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
162 * cannot mark rds_conn_path_up() in the window before lock_sock()
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700163 */
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700164 atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
165 wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700166 lock_sock(osock->sk);
167 /* reset receive side state for rds_tcp_data_recv() for osock */
Sowmini Varadhanac3615e2016-07-14 03:51:02 -0700168 cancel_delayed_work_sync(&cp->cp_send_w);
169 cancel_delayed_work_sync(&cp->cp_recv_w);
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700170 if (tc->t_tinc) {
171 rds_inc_put(&tc->t_tinc->ti_inc);
172 tc->t_tinc = NULL;
173 }
174 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
175 tc->t_tinc_data_rem = 0;
Sowmini Varadhanac3615e2016-07-14 03:51:02 -0700176 rds_tcp_restore_callbacks(osock, tc);
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700177 release_sock(osock->sk);
178 sock_release(osock);
179newsock:
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700180 rds_send_path_reset(cp);
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700181 lock_sock(sock->sk);
Sowmini Varadhanac3615e2016-07-14 03:51:02 -0700182 rds_tcp_set_callbacks(sock, cp);
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700183 release_sock(sock->sk);
184}
185
186/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
187 * above rds_tcp_reset_callbacks for notes about synchronization
188 * with data path
Andy Grover70041082009-08-21 12:28:31 +0000189 */
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700190void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
Andy Grover70041082009-08-21 12:28:31 +0000191{
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700192 struct rds_tcp_connection *tc = cp->cp_transport_data;
Andy Grover70041082009-08-21 12:28:31 +0000193
194 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
195 write_lock_bh(&sock->sk->sk_callback_lock);
196
197 /* done under the callback_lock to serialize with write_space */
198 spin_lock(&rds_tcp_tc_list_lock);
199 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
200 rds_tcp_tc_count++;
201 spin_unlock(&rds_tcp_tc_list_lock);
202
203 /* accepted sockets need our listen data ready undone */
204 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
205 sock->sk->sk_data_ready = sock->sk->sk_user_data;
206
207 tc->t_sock = sock;
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700208 tc->t_cpath = cp;
Andy Grover70041082009-08-21 12:28:31 +0000209 tc->t_orig_data_ready = sock->sk->sk_data_ready;
210 tc->t_orig_write_space = sock->sk->sk_write_space;
211 tc->t_orig_state_change = sock->sk->sk_state_change;
212
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700213 sock->sk->sk_user_data = cp;
Andy Grover70041082009-08-21 12:28:31 +0000214 sock->sk->sk_data_ready = rds_tcp_data_ready;
215 sock->sk->sk_write_space = rds_tcp_write_space;
216 sock->sk->sk_state_change = rds_tcp_state_change;
217
218 write_unlock_bh(&sock->sk->sk_callback_lock);
219}
220
Sowmini Varadhan1ac507d2016-11-04 10:04:11 -0700221static void rds_tcp_tc_info(struct socket *rds_sock, unsigned int len,
Andy Grover70041082009-08-21 12:28:31 +0000222 struct rds_info_iterator *iter,
223 struct rds_info_lengths *lens)
224{
225 struct rds_info_tcp_socket tsinfo;
226 struct rds_tcp_connection *tc;
227 unsigned long flags;
228 struct sockaddr_in sin;
229 int sinlen;
Sowmini Varadhan1ac507d2016-11-04 10:04:11 -0700230 struct socket *sock;
Andy Grover70041082009-08-21 12:28:31 +0000231
232 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
233
234 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
235 goto out;
236
237 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
238
Sowmini Varadhan1ac507d2016-11-04 10:04:11 -0700239 sock = tc->t_sock;
240 if (sock) {
241 sock->ops->getname(sock, (struct sockaddr *)&sin,
242 &sinlen, 0);
243 tsinfo.local_addr = sin.sin_addr.s_addr;
244 tsinfo.local_port = sin.sin_port;
245 sock->ops->getname(sock, (struct sockaddr *)&sin,
246 &sinlen, 1);
247 tsinfo.peer_addr = sin.sin_addr.s_addr;
248 tsinfo.peer_port = sin.sin_port;
249 }
Andy Grover70041082009-08-21 12:28:31 +0000250
251 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
252 tsinfo.data_rem = tc->t_tinc_data_rem;
253 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
254 tsinfo.last_expected_una = tc->t_last_expected_una;
255 tsinfo.last_seen_una = tc->t_last_seen_una;
256
257 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
258 }
259
260out:
261 lens->nr = rds_tcp_tc_count;
262 lens->each = sizeof(tsinfo);
263
264 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
265}
266
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400267static int rds_tcp_laddr_check(struct net *net, __be32 addr)
Andy Grover70041082009-08-21 12:28:31 +0000268{
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400269 if (inet_addr_type(net, addr) == RTN_LOCAL)
Andy Grover70041082009-08-21 12:28:31 +0000270 return 0;
271 return -EADDRNOTAVAIL;
272}
273
274static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
275{
276 struct rds_tcp_connection *tc;
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700277 int i;
Andy Grover70041082009-08-21 12:28:31 +0000278
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700279 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
280 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
281 if (!tc)
282 return -ENOMEM;
Andy Grover70041082009-08-21 12:28:31 +0000283
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700284 mutex_init(&tc->t_conn_path_lock);
285 tc->t_sock = NULL;
286 tc->t_tinc = NULL;
287 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
288 tc->t_tinc_data_rem = 0;
Andy Grover70041082009-08-21 12:28:31 +0000289
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700290 conn->c_path[i].cp_transport_data = tc;
291 tc->t_cpath = &conn->c_path[i];
Andy Grover70041082009-08-21 12:28:31 +0000292
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700293 spin_lock_irq(&rds_tcp_conn_lock);
294 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
295 spin_unlock_irq(&rds_tcp_conn_lock);
296 rdsdebug("rds_conn_path [%d] tc %p\n", i,
297 conn->c_path[i].cp_transport_data);
298 }
Andy Grover70041082009-08-21 12:28:31 +0000299
Andy Grover70041082009-08-21 12:28:31 +0000300 return 0;
301}
302
303static void rds_tcp_conn_free(void *arg)
304{
305 struct rds_tcp_connection *tc = arg;
Pavel Emelyanov8200a592010-11-02 01:54:01 +0000306 unsigned long flags;
Andy Grover70041082009-08-21 12:28:31 +0000307 rdsdebug("freeing tc %p\n", tc);
Pavel Emelyanov8200a592010-11-02 01:54:01 +0000308
309 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
310 list_del(&tc->t_tcp_node);
311 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
312
Andy Grover70041082009-08-21 12:28:31 +0000313 kmem_cache_free(rds_tcp_conn_slab, tc);
314}
315
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700316static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
317{
318 struct rds_tcp_connection *tc, *_tc;
319
320 list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
321 if (tc->t_cpath->cp_conn == conn)
322 return true;
323 }
324 return false;
325}
326
Andy Grover70041082009-08-21 12:28:31 +0000327static void rds_tcp_destroy_conns(void)
328{
329 struct rds_tcp_connection *tc, *_tc;
330 LIST_HEAD(tmp_list);
331
332 /* avoid calling conn_destroy with irqs off */
333 spin_lock_irq(&rds_tcp_conn_lock);
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700334 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
335 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
336 list_move_tail(&tc->t_tcp_node, &tmp_list);
337 }
Andy Grover70041082009-08-21 12:28:31 +0000338 spin_unlock_irq(&rds_tcp_conn_lock);
339
Sowmini Varadhan26e4e6b2016-06-30 16:11:11 -0700340 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700341 rds_conn_destroy(tc->t_cpath->cp_conn);
Andy Grover70041082009-08-21 12:28:31 +0000342}
343
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400344static void rds_tcp_exit(void);
Andy Grover70041082009-08-21 12:28:31 +0000345
346struct rds_transport rds_tcp_transport = {
347 .laddr_check = rds_tcp_laddr_check,
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700348 .xmit_path_prepare = rds_tcp_xmit_path_prepare,
349 .xmit_path_complete = rds_tcp_xmit_path_complete,
Andy Grover70041082009-08-21 12:28:31 +0000350 .xmit = rds_tcp_xmit,
Sowmini Varadhan2da43c42016-06-30 16:11:15 -0700351 .recv_path = rds_tcp_recv_path,
Andy Grover70041082009-08-21 12:28:31 +0000352 .conn_alloc = rds_tcp_conn_alloc,
353 .conn_free = rds_tcp_conn_free,
Sowmini Varadhanb04e8552016-06-30 16:11:16 -0700354 .conn_path_connect = rds_tcp_conn_path_connect,
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700355 .conn_path_shutdown = rds_tcp_conn_path_shutdown,
Andy Grover70041082009-08-21 12:28:31 +0000356 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
Andy Grover70041082009-08-21 12:28:31 +0000357 .inc_free = rds_tcp_inc_free,
358 .stats_info_copy = rds_tcp_stats_info_copy,
359 .exit = rds_tcp_exit,
360 .t_owner = THIS_MODULE,
361 .t_name = "tcp",
Andy Grover335776b2009-08-21 12:28:34 +0000362 .t_type = RDS_TRANS_TCP,
Andy Grover70041082009-08-21 12:28:31 +0000363 .t_prefer_loopback = 1,
Sowmini Varadhan5916e2c2016-07-14 03:51:03 -0700364 .t_mp_capable = 1,
Andy Grover70041082009-08-21 12:28:31 +0000365};
366
Alexey Dobriyanc7d03a02016-11-17 04:58:21 +0300367static unsigned int rds_tcp_netid;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400368
369/* per-network namespace private data for this module */
370struct rds_tcp_net {
371 struct socket *rds_tcp_listen_sock;
372 struct work_struct rds_tcp_accept_w;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700373 struct ctl_table_header *rds_tcp_sysctl;
374 struct ctl_table *ctl_table;
375 int sndbuf_size;
376 int rcvbuf_size;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400377};
378
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700379/* All module specific customizations to the RDS-TCP socket should be done in
380 * rds_tcp_tune() and applied after socket creation.
381 */
382void rds_tcp_tune(struct socket *sock)
383{
384 struct sock *sk = sock->sk;
385 struct net *net = sock_net(sk);
386 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
387
388 rds_tcp_nonagle(sock);
389 lock_sock(sk);
390 if (rtn->sndbuf_size > 0) {
391 sk->sk_sndbuf = rtn->sndbuf_size;
392 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
393 }
394 if (rtn->rcvbuf_size > 0) {
395 sk->sk_sndbuf = rtn->rcvbuf_size;
396 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
397 }
398 release_sock(sk);
399}
400
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400401static void rds_tcp_accept_worker(struct work_struct *work)
402{
403 struct rds_tcp_net *rtn = container_of(work,
404 struct rds_tcp_net,
405 rds_tcp_accept_w);
406
407 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
408 cond_resched();
409}
410
411void rds_tcp_accept_work(struct sock *sk)
412{
413 struct net *net = sock_net(sk);
414 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
415
416 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
417}
418
419static __net_init int rds_tcp_init_net(struct net *net)
420{
421 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700422 struct ctl_table *tbl;
423 int err = 0;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400424
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700425 memset(rtn, 0, sizeof(*rtn));
426
427 /* {snd, rcv}buf_size default to 0, which implies we let the
428 * stack pick the value, and permit auto-tuning of buffer size.
429 */
430 if (net == &init_net) {
431 tbl = rds_tcp_sysctl_table;
432 } else {
433 tbl = kmemdup(rds_tcp_sysctl_table,
434 sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
435 if (!tbl) {
436 pr_warn("could not set allocate syctl table\n");
437 return -ENOMEM;
438 }
439 rtn->ctl_table = tbl;
440 }
441 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
442 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
443 rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl);
444 if (!rtn->rds_tcp_sysctl) {
445 pr_warn("could not register sysctl\n");
446 err = -ENOMEM;
447 goto fail;
448 }
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400449 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
450 if (!rtn->rds_tcp_listen_sock) {
451 pr_warn("could not set up listen sock\n");
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700452 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
453 rtn->rds_tcp_sysctl = NULL;
454 err = -EAFNOSUPPORT;
455 goto fail;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400456 }
457 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
458 return 0;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700459
460fail:
461 if (net != &init_net)
462 kfree(tbl);
463 return err;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400464}
465
466static void __net_exit rds_tcp_exit_net(struct net *net)
467{
468 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
469
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700470 if (rtn->rds_tcp_sysctl)
471 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
472
473 if (net != &init_net && rtn->ctl_table)
474 kfree(rtn->ctl_table);
475
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400476 /* If rds_tcp_exit_net() is called as a result of netns deletion,
477 * the rds_tcp_kill_sock() device notifier would already have cleaned
478 * up the listen socket, thus there is no work to do in this function.
479 *
480 * If rds_tcp_exit_net() is called as a result of module unload,
481 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
482 * we do need to clean up the listen socket here.
483 */
484 if (rtn->rds_tcp_listen_sock) {
Sowmini Varadhanb21dd452017-03-04 08:57:35 -0800485 struct socket *lsock = rtn->rds_tcp_listen_sock;
486
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400487 rtn->rds_tcp_listen_sock = NULL;
Sowmini Varadhanb21dd452017-03-04 08:57:35 -0800488 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400489 }
490}
491
492static struct pernet_operations rds_tcp_net_ops = {
493 .init = rds_tcp_init_net,
494 .exit = rds_tcp_exit_net,
495 .id = &rds_tcp_netid,
496 .size = sizeof(struct rds_tcp_net),
497};
498
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700499/* explicitly send a RST on each socket, thereby releasing any socket refcnts
500 * that may otherwise hold up netns deletion.
501 */
502static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
503{
504 struct rds_conn_path *cp;
505 struct rds_tcp_connection *tc;
506 int i;
507 struct sock *sk;
508
509 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
510 cp = &conn->c_path[i];
511 tc = cp->cp_transport_data;
512 if (!tc->t_sock)
513 continue;
514 sk = tc->t_sock->sk;
515 sk->sk_prot->disconnect(sk, 0);
516 tcp_done(sk);
517 }
518}
519
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400520static void rds_tcp_kill_sock(struct net *net)
521{
522 struct rds_tcp_connection *tc, *_tc;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400523 LIST_HEAD(tmp_list);
524 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
Sowmini Varadhanb21dd452017-03-04 08:57:35 -0800525 struct socket *lsock = rtn->rds_tcp_listen_sock;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400526
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400527 rtn->rds_tcp_listen_sock = NULL;
Sowmini Varadhanb21dd452017-03-04 08:57:35 -0800528 rds_tcp_listen_stop(lsock, &rtn->rds_tcp_accept_w);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400529 spin_lock_irq(&rds_tcp_conn_lock);
530 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800531 struct net *c_net = tc->t_cpath->cp_conn->c_net;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400532
533 if (net != c_net || !tc->t_sock)
534 continue;
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700535 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
536 list_move_tail(&tc->t_tcp_node, &tmp_list);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400537 }
538 spin_unlock_irq(&rds_tcp_conn_lock);
539 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700540 rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700541 rds_conn_destroy(tc->t_cpath->cp_conn);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400542 }
543}
544
Sowmini Varadhana93d01f2016-07-14 03:51:01 -0700545void *rds_tcp_listen_sock_def_readable(struct net *net)
546{
547 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
Sowmini Varadhanb21dd452017-03-04 08:57:35 -0800548 struct socket *lsock = rtn->rds_tcp_listen_sock;
Sowmini Varadhana93d01f2016-07-14 03:51:01 -0700549
Sowmini Varadhanb21dd452017-03-04 08:57:35 -0800550 if (!lsock)
551 return NULL;
552
553 return lsock->sk->sk_user_data;
Sowmini Varadhana93d01f2016-07-14 03:51:01 -0700554}
555
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400556static int rds_tcp_dev_event(struct notifier_block *this,
557 unsigned long event, void *ptr)
558{
559 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
560
561 /* rds-tcp registers as a pernet subys, so the ->exit will only
562 * get invoked after network acitivity has quiesced. We need to
563 * clean up all sockets to quiesce network activity, and use
564 * the unregistration of the per-net loopback device as a trigger
565 * to start that cleanup.
566 */
567 if (event == NETDEV_UNREGISTER_FINAL &&
568 dev->ifindex == LOOPBACK_IFINDEX)
569 rds_tcp_kill_sock(dev_net(dev));
570
571 return NOTIFY_DONE;
572}
573
574static struct notifier_block rds_tcp_dev_notifier = {
575 .notifier_call = rds_tcp_dev_event,
576 .priority = -10, /* must be called after other network notifiers */
577};
578
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700579/* when sysctl is used to modify some kernel socket parameters,this
580 * function resets the RDS connections in that netns so that we can
581 * restart with new parameters. The assumption is that such reset
582 * events are few and far-between.
583 */
584static void rds_tcp_sysctl_reset(struct net *net)
585{
586 struct rds_tcp_connection *tc, *_tc;
587
588 spin_lock_irq(&rds_tcp_conn_lock);
589 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
Sowmini Varadhan8edc3af2017-03-04 08:57:33 -0800590 struct net *c_net = tc->t_cpath->cp_conn->c_net;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700591
592 if (net != c_net || !tc->t_sock)
593 continue;
594
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700595 /* reconnect with new parameters */
Sowmini Varadhanaed20a52017-07-16 16:43:46 -0700596 rds_conn_path_drop(tc->t_cpath, false);
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700597 }
598 spin_unlock_irq(&rds_tcp_conn_lock);
599}
600
601static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
602 void __user *buffer, size_t *lenp,
603 loff_t *fpos)
604{
605 struct net *net = current->nsproxy->net_ns;
606 int err;
607
608 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
609 if (err < 0) {
610 pr_warn("Invalid input. Must be >= %d\n",
611 *(int *)(ctl->extra1));
612 return err;
613 }
614 if (write)
615 rds_tcp_sysctl_reset(net);
616 return 0;
617}
618
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400619static void rds_tcp_exit(void)
620{
621 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
622 unregister_pernet_subsys(&rds_tcp_net_ops);
623 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
624 pr_warn("could not unregister rds_tcp_dev_notifier\n");
625 rds_tcp_destroy_conns();
626 rds_trans_unregister(&rds_tcp_transport);
627 rds_tcp_recv_exit();
628 kmem_cache_destroy(rds_tcp_conn_slab);
629}
630module_exit(rds_tcp_exit);
631
stephen hemmingerff51bf82010-10-19 08:08:33 +0000632static int rds_tcp_init(void)
Andy Grover70041082009-08-21 12:28:31 +0000633{
634 int ret;
635
636 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
637 sizeof(struct rds_tcp_connection),
638 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800639 if (!rds_tcp_conn_slab) {
Andy Grover70041082009-08-21 12:28:31 +0000640 ret = -ENOMEM;
641 goto out;
642 }
643
Sowmini Varadhan16c09b12017-03-04 08:57:34 -0800644 ret = rds_tcp_recv_init();
645 if (ret)
Zhu Yanjun3b5923f2017-02-24 04:28:01 -0500646 goto out_slab;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400647
648 ret = register_pernet_subsys(&rds_tcp_net_ops);
649 if (ret)
Sowmini Varadhan16c09b12017-03-04 08:57:34 -0800650 goto out_recv;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400651
Sowmini Varadhan16c09b12017-03-04 08:57:34 -0800652 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
653 if (ret) {
654 pr_warn("could not register rds_tcp_dev_notifier\n");
Vegard Nossum3dad5422016-07-03 10:54:54 +0200655 goto out_pernet;
Sowmini Varadhan16c09b12017-03-04 08:57:34 -0800656 }
Andy Grover70041082009-08-21 12:28:31 +0000657
Zhu Yanjuna8d63a52017-03-03 00:44:26 -0500658 rds_trans_register(&rds_tcp_transport);
Andy Grover70041082009-08-21 12:28:31 +0000659
Andy Grover70041082009-08-21 12:28:31 +0000660 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
661
662 goto out;
663
Vegard Nossum3dad5422016-07-03 10:54:54 +0200664out_pernet:
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400665 unregister_pernet_subsys(&rds_tcp_net_ops);
Sowmini Varadhan16c09b12017-03-04 08:57:34 -0800666out_recv:
667 rds_tcp_recv_exit();
Zhu Yanjun3b5923f2017-02-24 04:28:01 -0500668out_slab:
Andy Grover70041082009-08-21 12:28:31 +0000669 kmem_cache_destroy(rds_tcp_conn_slab);
670out:
671 return ret;
672}
673module_init(rds_tcp_init);
674
675MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
676MODULE_DESCRIPTION("RDS: TCP transport");
677MODULE_LICENSE("Dual BSD/GPL");
678