blob: 5658f3e9f6019100458e0da8d04318add7ffc397 [file] [log] [blame]
Andy Grover70041082009-08-21 12:28:31 +00001/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Andy Grover70041082009-08-21 12:28:31 +000035#include <linux/in.h>
Paul Gortmaker3a9a2312011-05-27 09:12:25 -040036#include <linux/module.h>
Andy Grover70041082009-08-21 12:28:31 +000037#include <net/tcp.h>
Sowmini Varadhan467fa152015-08-05 01:43:26 -040038#include <net/net_namespace.h>
39#include <net/netns/generic.h>
Andy Grover70041082009-08-21 12:28:31 +000040
Sowmini Varadhan0cb43962016-06-13 09:44:26 -070041#include "rds_single_path.h"
Andy Grover70041082009-08-21 12:28:31 +000042#include "rds.h"
43#include "tcp.h"
44
45/* only for info exporting */
46static DEFINE_SPINLOCK(rds_tcp_tc_list_lock);
47static LIST_HEAD(rds_tcp_tc_list);
stephen hemmingerff51bf82010-10-19 08:08:33 +000048static unsigned int rds_tcp_tc_count;
Andy Grover70041082009-08-21 12:28:31 +000049
50/* Track rds_tcp_connection structs so they can be cleaned up */
51static DEFINE_SPINLOCK(rds_tcp_conn_lock);
52static LIST_HEAD(rds_tcp_conn_list);
53
54static struct kmem_cache *rds_tcp_conn_slab;
55
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -070056static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
57 void __user *buffer, size_t *lenp,
58 loff_t *fpos);
59
Wei Yongjunaf73e722016-06-17 18:12:46 +000060static int rds_tcp_min_sndbuf = SOCK_MIN_SNDBUF;
61static int rds_tcp_min_rcvbuf = SOCK_MIN_RCVBUF;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -070062
63static struct ctl_table rds_tcp_sysctl_table[] = {
64#define RDS_TCP_SNDBUF 0
65 {
66 .procname = "rds_tcp_sndbuf",
67 /* data is per-net pointer */
68 .maxlen = sizeof(int),
69 .mode = 0644,
70 .proc_handler = rds_tcp_skbuf_handler,
71 .extra1 = &rds_tcp_min_sndbuf,
72 },
73#define RDS_TCP_RCVBUF 1
74 {
75 .procname = "rds_tcp_rcvbuf",
76 /* data is per-net pointer */
77 .maxlen = sizeof(int),
78 .mode = 0644,
79 .proc_handler = rds_tcp_skbuf_handler,
80 .extra1 = &rds_tcp_min_rcvbuf,
81 },
82 { }
83};
84
Andy Grover70041082009-08-21 12:28:31 +000085/* doing it this way avoids calling tcp_sk() */
86void rds_tcp_nonagle(struct socket *sock)
87{
88 mm_segment_t oldfs = get_fs();
89 int val = 1;
90
91 set_fs(KERNEL_DS);
92 sock->ops->setsockopt(sock, SOL_TCP, TCP_NODELAY, (char __user *)&val,
93 sizeof(val));
94 set_fs(oldfs);
95}
96
Andy Grover70041082009-08-21 12:28:31 +000097u32 rds_tcp_snd_nxt(struct rds_tcp_connection *tc)
98{
99 return tcp_sk(tc->t_sock->sk)->snd_nxt;
100}
101
102u32 rds_tcp_snd_una(struct rds_tcp_connection *tc)
103{
104 return tcp_sk(tc->t_sock->sk)->snd_una;
105}
106
107void rds_tcp_restore_callbacks(struct socket *sock,
108 struct rds_tcp_connection *tc)
109{
110 rdsdebug("restoring sock %p callbacks from tc %p\n", sock, tc);
111 write_lock_bh(&sock->sk->sk_callback_lock);
112
113 /* done under the callback_lock to serialize with write_space */
114 spin_lock(&rds_tcp_tc_list_lock);
115 list_del_init(&tc->t_list_item);
116 rds_tcp_tc_count--;
117 spin_unlock(&rds_tcp_tc_list_lock);
118
119 tc->t_sock = NULL;
120
121 sock->sk->sk_write_space = tc->t_orig_write_space;
122 sock->sk->sk_data_ready = tc->t_orig_data_ready;
123 sock->sk->sk_state_change = tc->t_orig_state_change;
124 sock->sk->sk_user_data = NULL;
125
126 write_unlock_bh(&sock->sk->sk_callback_lock);
127}
128
129/*
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700130 * rds_tcp_reset_callbacks() switches the to the new sock and
131 * returns the existing tc->t_sock.
132 *
133 * The only functions that set tc->t_sock are rds_tcp_set_callbacks
134 * and rds_tcp_reset_callbacks. Send and receive trust that
135 * it is set. The absence of RDS_CONN_UP bit protects those paths
136 * from being called while it isn't set.
137 */
138void rds_tcp_reset_callbacks(struct socket *sock,
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700139 struct rds_conn_path *cp)
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700140{
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700141 struct rds_tcp_connection *tc = cp->cp_transport_data;
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700142 struct socket *osock = tc->t_sock;
143
144 if (!osock)
145 goto newsock;
146
147 /* Need to resolve a duelling SYN between peers.
148 * We have an outstanding SYN to this peer, which may
149 * potentially have transitioned to the RDS_CONN_UP state,
150 * so we must quiesce any send threads before resetting
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700151 * cp_transport_data. We quiesce these threads by setting
152 * cp_state to something other than RDS_CONN_UP, and then
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700153 * waiting for any existing threads in rds_send_xmit to
154 * complete release_in_xmit(). (Subsequent threads entering
155 * rds_send_xmit() will bail on !rds_conn_up().
Sowmini Varadhan9c794402016-06-04 14:00:00 -0700156 *
157 * However an incoming syn-ack at this point would end up
158 * marking the conn as RDS_CONN_UP, and would again permit
159 * rds_send_xmi() threads through, so ideally we would
160 * synchronize on RDS_CONN_UP after lock_sock(), but cannot
161 * do that: waiting on !RDS_IN_XMIT after lock_sock() may
162 * end up deadlocking with tcp_sendmsg(), and the RDS_IN_XMIT
163 * would not get set. As a result, we set c_state to
164 * RDS_CONN_RESETTTING, to ensure that rds_tcp_state_change
165 * cannot mark rds_conn_path_up() in the window before lock_sock()
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700166 */
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700167 atomic_set(&cp->cp_state, RDS_CONN_RESETTING);
168 wait_event(cp->cp_waitq, !test_bit(RDS_IN_XMIT, &cp->cp_flags));
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700169 lock_sock(osock->sk);
170 /* reset receive side state for rds_tcp_data_recv() for osock */
171 if (tc->t_tinc) {
172 rds_inc_put(&tc->t_tinc->ti_inc);
173 tc->t_tinc = NULL;
174 }
175 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
176 tc->t_tinc_data_rem = 0;
177 tc->t_sock = NULL;
178
179 write_lock_bh(&osock->sk->sk_callback_lock);
180
181 osock->sk->sk_user_data = NULL;
182 osock->sk->sk_data_ready = tc->t_orig_data_ready;
183 osock->sk->sk_write_space = tc->t_orig_write_space;
184 osock->sk->sk_state_change = tc->t_orig_state_change;
185 write_unlock_bh(&osock->sk->sk_callback_lock);
186 release_sock(osock->sk);
187 sock_release(osock);
188newsock:
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700189 rds_send_path_reset(cp);
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700190 lock_sock(sock->sk);
191 write_lock_bh(&sock->sk->sk_callback_lock);
192 tc->t_sock = sock;
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700193 tc->t_cpath = cp;
194 sock->sk->sk_user_data = cp;
Sowmini Varadhan335b48d2016-06-04 13:59:58 -0700195 sock->sk->sk_data_ready = rds_tcp_data_ready;
196 sock->sk->sk_write_space = rds_tcp_write_space;
197 sock->sk->sk_state_change = rds_tcp_state_change;
198
199 write_unlock_bh(&sock->sk->sk_callback_lock);
200 release_sock(sock->sk);
201}
202
203/* Add tc to rds_tcp_tc_list and set tc->t_sock. See comments
204 * above rds_tcp_reset_callbacks for notes about synchronization
205 * with data path
Andy Grover70041082009-08-21 12:28:31 +0000206 */
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700207void rds_tcp_set_callbacks(struct socket *sock, struct rds_conn_path *cp)
Andy Grover70041082009-08-21 12:28:31 +0000208{
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700209 struct rds_tcp_connection *tc = cp->cp_transport_data;
Andy Grover70041082009-08-21 12:28:31 +0000210
211 rdsdebug("setting sock %p callbacks to tc %p\n", sock, tc);
212 write_lock_bh(&sock->sk->sk_callback_lock);
213
214 /* done under the callback_lock to serialize with write_space */
215 spin_lock(&rds_tcp_tc_list_lock);
216 list_add_tail(&tc->t_list_item, &rds_tcp_tc_list);
217 rds_tcp_tc_count++;
218 spin_unlock(&rds_tcp_tc_list_lock);
219
220 /* accepted sockets need our listen data ready undone */
221 if (sock->sk->sk_data_ready == rds_tcp_listen_data_ready)
222 sock->sk->sk_data_ready = sock->sk->sk_user_data;
223
224 tc->t_sock = sock;
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700225 tc->t_cpath = cp;
Andy Grover70041082009-08-21 12:28:31 +0000226 tc->t_orig_data_ready = sock->sk->sk_data_ready;
227 tc->t_orig_write_space = sock->sk->sk_write_space;
228 tc->t_orig_state_change = sock->sk->sk_state_change;
229
Sowmini Varadhanea3b1ea2016-06-30 16:11:14 -0700230 sock->sk->sk_user_data = cp;
Andy Grover70041082009-08-21 12:28:31 +0000231 sock->sk->sk_data_ready = rds_tcp_data_ready;
232 sock->sk->sk_write_space = rds_tcp_write_space;
233 sock->sk->sk_state_change = rds_tcp_state_change;
234
235 write_unlock_bh(&sock->sk->sk_callback_lock);
236}
237
238static void rds_tcp_tc_info(struct socket *sock, unsigned int len,
239 struct rds_info_iterator *iter,
240 struct rds_info_lengths *lens)
241{
242 struct rds_info_tcp_socket tsinfo;
243 struct rds_tcp_connection *tc;
244 unsigned long flags;
245 struct sockaddr_in sin;
246 int sinlen;
247
248 spin_lock_irqsave(&rds_tcp_tc_list_lock, flags);
249
250 if (len / sizeof(tsinfo) < rds_tcp_tc_count)
251 goto out;
252
253 list_for_each_entry(tc, &rds_tcp_tc_list, t_list_item) {
254
255 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 0);
256 tsinfo.local_addr = sin.sin_addr.s_addr;
257 tsinfo.local_port = sin.sin_port;
258 sock->ops->getname(sock, (struct sockaddr *)&sin, &sinlen, 1);
259 tsinfo.peer_addr = sin.sin_addr.s_addr;
260 tsinfo.peer_port = sin.sin_port;
261
262 tsinfo.hdr_rem = tc->t_tinc_hdr_rem;
263 tsinfo.data_rem = tc->t_tinc_data_rem;
264 tsinfo.last_sent_nxt = tc->t_last_sent_nxt;
265 tsinfo.last_expected_una = tc->t_last_expected_una;
266 tsinfo.last_seen_una = tc->t_last_seen_una;
267
268 rds_info_copy(iter, &tsinfo, sizeof(tsinfo));
269 }
270
271out:
272 lens->nr = rds_tcp_tc_count;
273 lens->each = sizeof(tsinfo);
274
275 spin_unlock_irqrestore(&rds_tcp_tc_list_lock, flags);
276}
277
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400278static int rds_tcp_laddr_check(struct net *net, __be32 addr)
Andy Grover70041082009-08-21 12:28:31 +0000279{
Sowmini Varadhand5a8ac22015-08-05 01:43:25 -0400280 if (inet_addr_type(net, addr) == RTN_LOCAL)
Andy Grover70041082009-08-21 12:28:31 +0000281 return 0;
282 return -EADDRNOTAVAIL;
283}
284
285static int rds_tcp_conn_alloc(struct rds_connection *conn, gfp_t gfp)
286{
287 struct rds_tcp_connection *tc;
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700288 int i;
Andy Grover70041082009-08-21 12:28:31 +0000289
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700290 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
291 tc = kmem_cache_alloc(rds_tcp_conn_slab, gfp);
292 if (!tc)
293 return -ENOMEM;
Andy Grover70041082009-08-21 12:28:31 +0000294
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700295 mutex_init(&tc->t_conn_path_lock);
296 tc->t_sock = NULL;
297 tc->t_tinc = NULL;
298 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
299 tc->t_tinc_data_rem = 0;
Andy Grover70041082009-08-21 12:28:31 +0000300
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700301 conn->c_path[i].cp_transport_data = tc;
302 tc->t_cpath = &conn->c_path[i];
Andy Grover70041082009-08-21 12:28:31 +0000303
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700304 spin_lock_irq(&rds_tcp_conn_lock);
305 list_add_tail(&tc->t_tcp_node, &rds_tcp_conn_list);
306 spin_unlock_irq(&rds_tcp_conn_lock);
307 rdsdebug("rds_conn_path [%d] tc %p\n", i,
308 conn->c_path[i].cp_transport_data);
309 }
Andy Grover70041082009-08-21 12:28:31 +0000310
Andy Grover70041082009-08-21 12:28:31 +0000311 return 0;
312}
313
314static void rds_tcp_conn_free(void *arg)
315{
316 struct rds_tcp_connection *tc = arg;
Pavel Emelyanov8200a592010-11-02 01:54:01 +0000317 unsigned long flags;
Andy Grover70041082009-08-21 12:28:31 +0000318 rdsdebug("freeing tc %p\n", tc);
Pavel Emelyanov8200a592010-11-02 01:54:01 +0000319
320 spin_lock_irqsave(&rds_tcp_conn_lock, flags);
321 list_del(&tc->t_tcp_node);
322 spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
323
Andy Grover70041082009-08-21 12:28:31 +0000324 kmem_cache_free(rds_tcp_conn_slab, tc);
325}
326
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700327static bool list_has_conn(struct list_head *list, struct rds_connection *conn)
328{
329 struct rds_tcp_connection *tc, *_tc;
330
331 list_for_each_entry_safe(tc, _tc, list, t_tcp_node) {
332 if (tc->t_cpath->cp_conn == conn)
333 return true;
334 }
335 return false;
336}
337
Andy Grover70041082009-08-21 12:28:31 +0000338static void rds_tcp_destroy_conns(void)
339{
340 struct rds_tcp_connection *tc, *_tc;
341 LIST_HEAD(tmp_list);
342
343 /* avoid calling conn_destroy with irqs off */
344 spin_lock_irq(&rds_tcp_conn_lock);
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700345 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
346 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
347 list_move_tail(&tc->t_tcp_node, &tmp_list);
348 }
Andy Grover70041082009-08-21 12:28:31 +0000349 spin_unlock_irq(&rds_tcp_conn_lock);
350
Sowmini Varadhan26e4e6b2016-06-30 16:11:11 -0700351 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node)
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700352 rds_conn_destroy(tc->t_cpath->cp_conn);
Andy Grover70041082009-08-21 12:28:31 +0000353}
354
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400355static void rds_tcp_exit(void);
Andy Grover70041082009-08-21 12:28:31 +0000356
357struct rds_transport rds_tcp_transport = {
358 .laddr_check = rds_tcp_laddr_check,
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700359 .xmit_path_prepare = rds_tcp_xmit_path_prepare,
360 .xmit_path_complete = rds_tcp_xmit_path_complete,
Andy Grover70041082009-08-21 12:28:31 +0000361 .xmit = rds_tcp_xmit,
362 .recv = rds_tcp_recv,
363 .conn_alloc = rds_tcp_conn_alloc,
364 .conn_free = rds_tcp_conn_free,
365 .conn_connect = rds_tcp_conn_connect,
Sowmini Varadhan226f7a72016-06-30 16:11:10 -0700366 .conn_path_shutdown = rds_tcp_conn_path_shutdown,
Andy Grover70041082009-08-21 12:28:31 +0000367 .inc_copy_to_user = rds_tcp_inc_copy_to_user,
Andy Grover70041082009-08-21 12:28:31 +0000368 .inc_free = rds_tcp_inc_free,
369 .stats_info_copy = rds_tcp_stats_info_copy,
370 .exit = rds_tcp_exit,
371 .t_owner = THIS_MODULE,
372 .t_name = "tcp",
Andy Grover335776b2009-08-21 12:28:34 +0000373 .t_type = RDS_TRANS_TCP,
Andy Grover70041082009-08-21 12:28:31 +0000374 .t_prefer_loopback = 1,
375};
376
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400377static int rds_tcp_netid;
378
379/* per-network namespace private data for this module */
380struct rds_tcp_net {
381 struct socket *rds_tcp_listen_sock;
382 struct work_struct rds_tcp_accept_w;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700383 struct ctl_table_header *rds_tcp_sysctl;
384 struct ctl_table *ctl_table;
385 int sndbuf_size;
386 int rcvbuf_size;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400387};
388
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700389/* All module specific customizations to the RDS-TCP socket should be done in
390 * rds_tcp_tune() and applied after socket creation.
391 */
392void rds_tcp_tune(struct socket *sock)
393{
394 struct sock *sk = sock->sk;
395 struct net *net = sock_net(sk);
396 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
397
398 rds_tcp_nonagle(sock);
399 lock_sock(sk);
400 if (rtn->sndbuf_size > 0) {
401 sk->sk_sndbuf = rtn->sndbuf_size;
402 sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
403 }
404 if (rtn->rcvbuf_size > 0) {
405 sk->sk_sndbuf = rtn->rcvbuf_size;
406 sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
407 }
408 release_sock(sk);
409}
410
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400411static void rds_tcp_accept_worker(struct work_struct *work)
412{
413 struct rds_tcp_net *rtn = container_of(work,
414 struct rds_tcp_net,
415 rds_tcp_accept_w);
416
417 while (rds_tcp_accept_one(rtn->rds_tcp_listen_sock) == 0)
418 cond_resched();
419}
420
421void rds_tcp_accept_work(struct sock *sk)
422{
423 struct net *net = sock_net(sk);
424 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
425
426 queue_work(rds_wq, &rtn->rds_tcp_accept_w);
427}
428
429static __net_init int rds_tcp_init_net(struct net *net)
430{
431 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700432 struct ctl_table *tbl;
433 int err = 0;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400434
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700435 memset(rtn, 0, sizeof(*rtn));
436
437 /* {snd, rcv}buf_size default to 0, which implies we let the
438 * stack pick the value, and permit auto-tuning of buffer size.
439 */
440 if (net == &init_net) {
441 tbl = rds_tcp_sysctl_table;
442 } else {
443 tbl = kmemdup(rds_tcp_sysctl_table,
444 sizeof(rds_tcp_sysctl_table), GFP_KERNEL);
445 if (!tbl) {
446 pr_warn("could not set allocate syctl table\n");
447 return -ENOMEM;
448 }
449 rtn->ctl_table = tbl;
450 }
451 tbl[RDS_TCP_SNDBUF].data = &rtn->sndbuf_size;
452 tbl[RDS_TCP_RCVBUF].data = &rtn->rcvbuf_size;
453 rtn->rds_tcp_sysctl = register_net_sysctl(net, "net/rds/tcp", tbl);
454 if (!rtn->rds_tcp_sysctl) {
455 pr_warn("could not register sysctl\n");
456 err = -ENOMEM;
457 goto fail;
458 }
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400459 rtn->rds_tcp_listen_sock = rds_tcp_listen_init(net);
460 if (!rtn->rds_tcp_listen_sock) {
461 pr_warn("could not set up listen sock\n");
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700462 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
463 rtn->rds_tcp_sysctl = NULL;
464 err = -EAFNOSUPPORT;
465 goto fail;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400466 }
467 INIT_WORK(&rtn->rds_tcp_accept_w, rds_tcp_accept_worker);
468 return 0;
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700469
470fail:
471 if (net != &init_net)
472 kfree(tbl);
473 return err;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400474}
475
476static void __net_exit rds_tcp_exit_net(struct net *net)
477{
478 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
479
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700480 if (rtn->rds_tcp_sysctl)
481 unregister_net_sysctl_table(rtn->rds_tcp_sysctl);
482
483 if (net != &init_net && rtn->ctl_table)
484 kfree(rtn->ctl_table);
485
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400486 /* If rds_tcp_exit_net() is called as a result of netns deletion,
487 * the rds_tcp_kill_sock() device notifier would already have cleaned
488 * up the listen socket, thus there is no work to do in this function.
489 *
490 * If rds_tcp_exit_net() is called as a result of module unload,
491 * i.e., due to rds_tcp_exit() -> unregister_pernet_subsys(), then
492 * we do need to clean up the listen socket here.
493 */
494 if (rtn->rds_tcp_listen_sock) {
495 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
496 rtn->rds_tcp_listen_sock = NULL;
497 flush_work(&rtn->rds_tcp_accept_w);
498 }
499}
500
501static struct pernet_operations rds_tcp_net_ops = {
502 .init = rds_tcp_init_net,
503 .exit = rds_tcp_exit_net,
504 .id = &rds_tcp_netid,
505 .size = sizeof(struct rds_tcp_net),
506};
507
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700508/* explicitly send a RST on each socket, thereby releasing any socket refcnts
509 * that may otherwise hold up netns deletion.
510 */
511static void rds_tcp_conn_paths_destroy(struct rds_connection *conn)
512{
513 struct rds_conn_path *cp;
514 struct rds_tcp_connection *tc;
515 int i;
516 struct sock *sk;
517
518 for (i = 0; i < RDS_MPATH_WORKERS; i++) {
519 cp = &conn->c_path[i];
520 tc = cp->cp_transport_data;
521 if (!tc->t_sock)
522 continue;
523 sk = tc->t_sock->sk;
524 sk->sk_prot->disconnect(sk, 0);
525 tcp_done(sk);
526 }
527}
528
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400529static void rds_tcp_kill_sock(struct net *net)
530{
531 struct rds_tcp_connection *tc, *_tc;
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400532 LIST_HEAD(tmp_list);
533 struct rds_tcp_net *rtn = net_generic(net, rds_tcp_netid);
534
535 rds_tcp_listen_stop(rtn->rds_tcp_listen_sock);
536 rtn->rds_tcp_listen_sock = NULL;
537 flush_work(&rtn->rds_tcp_accept_w);
538 spin_lock_irq(&rds_tcp_conn_lock);
539 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700540 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400541
542 if (net != c_net || !tc->t_sock)
543 continue;
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700544 if (!list_has_conn(&tmp_list, tc->t_cpath->cp_conn))
545 list_move_tail(&tc->t_tcp_node, &tmp_list);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400546 }
547 spin_unlock_irq(&rds_tcp_conn_lock);
548 list_for_each_entry_safe(tc, _tc, &tmp_list, t_tcp_node) {
Sowmini Varadhanafb41642016-06-30 16:11:13 -0700549 rds_tcp_conn_paths_destroy(tc->t_cpath->cp_conn);
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700550 rds_conn_destroy(tc->t_cpath->cp_conn);
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400551 }
552}
553
554static int rds_tcp_dev_event(struct notifier_block *this,
555 unsigned long event, void *ptr)
556{
557 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
558
559 /* rds-tcp registers as a pernet subys, so the ->exit will only
560 * get invoked after network acitivity has quiesced. We need to
561 * clean up all sockets to quiesce network activity, and use
562 * the unregistration of the per-net loopback device as a trigger
563 * to start that cleanup.
564 */
565 if (event == NETDEV_UNREGISTER_FINAL &&
566 dev->ifindex == LOOPBACK_IFINDEX)
567 rds_tcp_kill_sock(dev_net(dev));
568
569 return NOTIFY_DONE;
570}
571
572static struct notifier_block rds_tcp_dev_notifier = {
573 .notifier_call = rds_tcp_dev_event,
574 .priority = -10, /* must be called after other network notifiers */
575};
576
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700577/* when sysctl is used to modify some kernel socket parameters,this
578 * function resets the RDS connections in that netns so that we can
579 * restart with new parameters. The assumption is that such reset
580 * events are few and far-between.
581 */
582static void rds_tcp_sysctl_reset(struct net *net)
583{
584 struct rds_tcp_connection *tc, *_tc;
585
586 spin_lock_irq(&rds_tcp_conn_lock);
587 list_for_each_entry_safe(tc, _tc, &rds_tcp_conn_list, t_tcp_node) {
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700588 struct net *c_net = read_pnet(&tc->t_cpath->cp_conn->c_net);
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700589
590 if (net != c_net || !tc->t_sock)
591 continue;
592
Sowmini Varadhan02105b22016-06-30 16:11:12 -0700593 /* reconnect with new parameters */
594 rds_conn_path_drop(tc->t_cpath);
Sowmini Varadhanc6a58ff2016-03-16 11:38:12 -0700595 }
596 spin_unlock_irq(&rds_tcp_conn_lock);
597}
598
599static int rds_tcp_skbuf_handler(struct ctl_table *ctl, int write,
600 void __user *buffer, size_t *lenp,
601 loff_t *fpos)
602{
603 struct net *net = current->nsproxy->net_ns;
604 int err;
605
606 err = proc_dointvec_minmax(ctl, write, buffer, lenp, fpos);
607 if (err < 0) {
608 pr_warn("Invalid input. Must be >= %d\n",
609 *(int *)(ctl->extra1));
610 return err;
611 }
612 if (write)
613 rds_tcp_sysctl_reset(net);
614 return 0;
615}
616
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400617static void rds_tcp_exit(void)
618{
619 rds_info_deregister_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
620 unregister_pernet_subsys(&rds_tcp_net_ops);
621 if (unregister_netdevice_notifier(&rds_tcp_dev_notifier))
622 pr_warn("could not unregister rds_tcp_dev_notifier\n");
623 rds_tcp_destroy_conns();
624 rds_trans_unregister(&rds_tcp_transport);
625 rds_tcp_recv_exit();
626 kmem_cache_destroy(rds_tcp_conn_slab);
627}
628module_exit(rds_tcp_exit);
629
stephen hemmingerff51bf82010-10-19 08:08:33 +0000630static int rds_tcp_init(void)
Andy Grover70041082009-08-21 12:28:31 +0000631{
632 int ret;
633
634 rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection",
635 sizeof(struct rds_tcp_connection),
636 0, 0, NULL);
Andy Grover8690bfa2010-01-12 11:56:44 -0800637 if (!rds_tcp_conn_slab) {
Andy Grover70041082009-08-21 12:28:31 +0000638 ret = -ENOMEM;
639 goto out;
640 }
641
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400642 ret = register_netdevice_notifier(&rds_tcp_dev_notifier);
643 if (ret) {
644 pr_warn("could not register rds_tcp_dev_notifier\n");
645 goto out;
646 }
647
648 ret = register_pernet_subsys(&rds_tcp_net_ops);
649 if (ret)
650 goto out_slab;
651
Andy Grover70041082009-08-21 12:28:31 +0000652 ret = rds_tcp_recv_init();
653 if (ret)
654 goto out_slab;
655
656 ret = rds_trans_register(&rds_tcp_transport);
657 if (ret)
658 goto out_recv;
659
Andy Grover70041082009-08-21 12:28:31 +0000660 rds_info_register_func(RDS_INFO_TCP_SOCKETS, rds_tcp_tc_info);
661
662 goto out;
663
Andy Grover70041082009-08-21 12:28:31 +0000664out_recv:
665 rds_tcp_recv_exit();
666out_slab:
Sowmini Varadhan467fa152015-08-05 01:43:26 -0400667 unregister_pernet_subsys(&rds_tcp_net_ops);
Andy Grover70041082009-08-21 12:28:31 +0000668 kmem_cache_destroy(rds_tcp_conn_slab);
669out:
670 return ret;
671}
672module_init(rds_tcp_init);
673
674MODULE_AUTHOR("Oracle Corporation <rds-devel@oss.oracle.com>");
675MODULE_DESCRIPTION("RDS: TCP transport");
676MODULE_LICENSE("Dual BSD/GPL");
677