blob: f13dad0fd9ef3dc7d7b95783f6cf048cde6cd3b5 [file] [log] [blame]
Thomas Gleixner2522fe42019-05-28 09:57:20 -07001// SPDX-License-Identifier: GPL-2.0-only
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01002/******************************************************************************
3*******************************************************************************
4**
5** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06006** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01007**
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01008**
9*******************************************************************************
10******************************************************************************/
11
12/*
13 * lowcomms.c
14 *
15 * This is the "low-level" comms layer.
16 *
17 * It is responsible for sending/receiving messages
18 * from other nodes in the cluster.
19 *
20 * Cluster nodes are referred to by their nodeids. nodeids are
21 * simply 32 bit numbers to the locking module - if they need to
Joe Perches2cf12c02009-01-22 13:26:47 -080022 * be expanded for the cluster infrastructure then that is its
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010023 * responsibility. It is this layer's
24 * responsibility to resolve these into IP address or
25 * whatever it needs for inter-node communication.
26 *
27 * The comms level is two kernel threads that deal mainly with
28 * the receiving of messages from other nodes and passing them
29 * up to the mid-level comms layer (which understands the
30 * message format) for execution by the locking core, and
31 * a send thread which does all the setting up of connections
32 * to remote nodes and the sending of data. Threads are not allowed
33 * to send their own data because it may cause them to wait in times
34 * of high load. Also, this way, the sending thread can collect together
35 * messages bound for one node and send them in one block.
36 *
Joe Perches2cf12c02009-01-22 13:26:47 -080037 * lowcomms will choose to use either TCP or SCTP as its transport layer
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010038 * depending on the configuration variable 'protocol'. This should be set
Joe Perches2cf12c02009-01-22 13:26:47 -080039 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010040 * cluster-wide mechanism as it must be the same on all nodes of the cluster
41 * for the DLM to function.
42 *
43 */
44
45#include <asm/ioctls.h>
46#include <net/sock.h>
47#include <net/tcp.h>
48#include <linux/pagemap.h>
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010049#include <linux/file.h>
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -050050#include <linux/mutex.h>
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010051#include <linux/sctp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090052#include <linux/slab.h>
Benjamin Poirier2f2d76c2012-03-08 05:55:59 +000053#include <net/sctp/sctp.h>
Joe Perches44ad5322009-01-22 13:24:49 -080054#include <net/ipv6.h>
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010055
56#include "dlm_internal.h"
57#include "lowcomms.h"
58#include "midcomms.h"
59#include "config.h"
60
61#define NEEDED_RMEM (4*1024*1024)
Christine Caulfield5e9ccc32009-01-28 12:57:40 -060062#define CONN_HASH_SIZE 32
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010063
Bob Petersonf92c8dd2010-11-12 11:15:20 -060064/* Number of messages to send before rescheduling */
65#define MAX_SEND_MSG_COUNT 25
66
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010067struct cbuf {
68 unsigned int base;
69 unsigned int len;
70 unsigned int mask;
71};
72
73static void cbuf_add(struct cbuf *cb, int n)
74{
75 cb->len += n;
76}
77
78static int cbuf_data(struct cbuf *cb)
79{
80 return ((cb->base + cb->len) & cb->mask);
81}
82
83static void cbuf_init(struct cbuf *cb, int size)
84{
85 cb->base = cb->len = 0;
86 cb->mask = size-1;
87}
88
89static void cbuf_eat(struct cbuf *cb, int n)
90{
91 cb->len -= n;
92 cb->base += n;
93 cb->base &= cb->mask;
94}
95
96static bool cbuf_empty(struct cbuf *cb)
97{
98 return cb->len == 0;
99}
100
101struct connection {
102 struct socket *sock; /* NULL if not connected */
103 uint32_t nodeid; /* So we know who we are in the list */
104 struct mutex sock_mutex;
105 unsigned long flags;
106#define CF_READ_PENDING 1
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +0000107#define CF_WRITE_PENDING 2
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100108#define CF_INIT_PENDING 4
109#define CF_IS_OTHERCON 5
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -0500110#define CF_CLOSE 6
David Millerb36930d2010-11-10 21:56:39 -0800111#define CF_APP_LIMITED 7
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000112#define CF_CLOSING 8
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100113 struct list_head writequeue; /* List of outgoing writequeue_entries */
114 spinlock_t writequeue_lock;
115 int (*rx_action) (struct connection *); /* What to do when active */
116 void (*connect_action) (struct connection *); /* What to do to connect */
117 struct page *rx_page;
118 struct cbuf cb;
119 int retries;
120#define MAX_CONNECT_RETRIES 3
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600121 struct hlist_node list;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100122 struct connection *othercon;
123 struct work_struct rwork; /* Receive workqueue */
124 struct work_struct swork; /* Send workqueue */
125};
126#define sock2con(x) ((struct connection *)(x)->sk_user_data)
127
128/* An entry waiting to be sent */
129struct writequeue_entry {
130 struct list_head list;
131 struct page *page;
132 int offset;
133 int len;
134 int end;
135 int users;
136 struct connection *con;
137};
138
David Teigland36b71a82012-07-26 12:44:30 -0500139struct dlm_node_addr {
140 struct list_head list;
141 int nodeid;
142 int addr_count;
Mike Christie98e1b602013-06-14 04:56:12 -0500143 int curr_addr_index;
David Teigland36b71a82012-07-26 12:44:30 -0500144 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
145};
146
Bob Petersoncc661fc2017-09-12 08:55:23 +0000147static struct listen_sock_callbacks {
148 void (*sk_error_report)(struct sock *);
149 void (*sk_data_ready)(struct sock *);
150 void (*sk_state_change)(struct sock *);
151 void (*sk_write_space)(struct sock *);
152} listen_sock;
153
David Teigland36b71a82012-07-26 12:44:30 -0500154static LIST_HEAD(dlm_node_addrs);
155static DEFINE_SPINLOCK(dlm_node_addrs_spin);
156
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100157static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
158static int dlm_local_count;
David Teigland513ef592012-03-30 11:46:08 -0500159static int dlm_allow_conn;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100160
161/* Work queues */
162static struct workqueue_struct *recv_workqueue;
163static struct workqueue_struct *send_workqueue;
164
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600165static struct hlist_head connection_hash[CONN_HASH_SIZE];
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -0500166static DEFINE_MUTEX(connections_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100167static struct kmem_cache *con_cache;
168
169static void process_recv_sockets(struct work_struct *work);
170static void process_send_sockets(struct work_struct *work);
171
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600172
173/* This is deliberately very simple because most clusters have simple
174 sequential nodeids, so we should be able to go straight to a connection
175 struct in the array */
176static inline int nodeid_hash(int nodeid)
177{
178 return nodeid & (CONN_HASH_SIZE-1);
179}
180
181static struct connection *__find_con(int nodeid)
182{
183 int r;
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600184 struct connection *con;
185
186 r = nodeid_hash(nodeid);
187
Sasha Levinb67bfe02013-02-27 17:06:00 -0800188 hlist_for_each_entry(con, &connection_hash[r], list) {
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600189 if (con->nodeid == nodeid)
190 return con;
191 }
192 return NULL;
193}
194
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100195/*
196 * If 'allocation' is zero then we don't attempt to create a new
197 * connection structure for this node.
198 */
199static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
200{
201 struct connection *con = NULL;
202 int r;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100203
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600204 con = __find_con(nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100205 if (con || !alloc)
206 return con;
207
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100208 con = kmem_cache_zalloc(con_cache, alloc);
209 if (!con)
210 return NULL;
211
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600212 r = nodeid_hash(nodeid);
213 hlist_add_head(&con->list, &connection_hash[r]);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100214
215 con->nodeid = nodeid;
216 mutex_init(&con->sock_mutex);
217 INIT_LIST_HEAD(&con->writequeue);
218 spin_lock_init(&con->writequeue_lock);
219 INIT_WORK(&con->swork, process_send_sockets);
220 INIT_WORK(&con->rwork, process_recv_sockets);
221
222 /* Setup action pointers for child sockets */
223 if (con->nodeid) {
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600224 struct connection *zerocon = __find_con(0);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100225
226 con->connect_action = zerocon->connect_action;
227 if (!con->rx_action)
228 con->rx_action = zerocon->rx_action;
229 }
230
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100231 return con;
232}
233
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600234/* Loop round all connections */
235static void foreach_conn(void (*conn_func)(struct connection *c))
236{
237 int i;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800238 struct hlist_node *n;
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600239 struct connection *con;
240
241 for (i = 0; i < CONN_HASH_SIZE; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800242 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600243 conn_func(con);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600244 }
245}
246
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100247static struct connection *nodeid2con(int nodeid, gfp_t allocation)
248{
249 struct connection *con;
250
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -0500251 mutex_lock(&connections_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100252 con = __nodeid2con(nodeid, allocation);
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -0500253 mutex_unlock(&connections_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100254
255 return con;
256}
257
David Teigland36b71a82012-07-26 12:44:30 -0500258static struct dlm_node_addr *find_node_addr(int nodeid)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100259{
David Teigland36b71a82012-07-26 12:44:30 -0500260 struct dlm_node_addr *na;
261
262 list_for_each_entry(na, &dlm_node_addrs, list) {
263 if (na->nodeid == nodeid)
264 return na;
265 }
266 return NULL;
267}
268
269static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
270{
271 switch (x->ss_family) {
272 case AF_INET: {
273 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
274 struct sockaddr_in *siny = (struct sockaddr_in *)y;
275 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
276 return 0;
277 if (sinx->sin_port != siny->sin_port)
278 return 0;
279 break;
280 }
281 case AF_INET6: {
282 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
283 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
284 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
285 return 0;
286 if (sinx->sin6_port != siny->sin6_port)
287 return 0;
288 break;
289 }
290 default:
291 return 0;
292 }
293 return 1;
294}
295
296static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
Mike Christie98e1b602013-06-14 04:56:12 -0500297 struct sockaddr *sa_out, bool try_new_addr)
David Teigland36b71a82012-07-26 12:44:30 -0500298{
299 struct sockaddr_storage sas;
300 struct dlm_node_addr *na;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100301
302 if (!dlm_local_count)
303 return -1;
304
David Teigland36b71a82012-07-26 12:44:30 -0500305 spin_lock(&dlm_node_addrs_spin);
306 na = find_node_addr(nodeid);
Mike Christie98e1b602013-06-14 04:56:12 -0500307 if (na && na->addr_count) {
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300308 memcpy(&sas, na->addr[na->curr_addr_index],
309 sizeof(struct sockaddr_storage));
310
Mike Christie98e1b602013-06-14 04:56:12 -0500311 if (try_new_addr) {
312 na->curr_addr_index++;
313 if (na->curr_addr_index == na->addr_count)
314 na->curr_addr_index = 0;
315 }
Mike Christie98e1b602013-06-14 04:56:12 -0500316 }
David Teigland36b71a82012-07-26 12:44:30 -0500317 spin_unlock(&dlm_node_addrs_spin);
318
319 if (!na)
320 return -EEXIST;
321
322 if (!na->addr_count)
323 return -ENOENT;
324
325 if (sas_out)
326 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
327
328 if (!sa_out)
329 return 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100330
331 if (dlm_local_addr[0]->ss_family == AF_INET) {
David Teigland36b71a82012-07-26 12:44:30 -0500332 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
333 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100334 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
335 } else {
David Teigland36b71a82012-07-26 12:44:30 -0500336 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
337 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000338 ret6->sin6_addr = in6->sin6_addr;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100339 }
340
341 return 0;
342}
343
David Teigland36b71a82012-07-26 12:44:30 -0500344static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
345{
346 struct dlm_node_addr *na;
347 int rv = -EEXIST;
Mike Christie98e1b602013-06-14 04:56:12 -0500348 int addr_i;
David Teigland36b71a82012-07-26 12:44:30 -0500349
350 spin_lock(&dlm_node_addrs_spin);
351 list_for_each_entry(na, &dlm_node_addrs, list) {
352 if (!na->addr_count)
353 continue;
354
Mike Christie98e1b602013-06-14 04:56:12 -0500355 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
356 if (addr_compare(na->addr[addr_i], addr)) {
357 *nodeid = na->nodeid;
358 rv = 0;
359 goto unlock;
360 }
361 }
David Teigland36b71a82012-07-26 12:44:30 -0500362 }
Mike Christie98e1b602013-06-14 04:56:12 -0500363unlock:
David Teigland36b71a82012-07-26 12:44:30 -0500364 spin_unlock(&dlm_node_addrs_spin);
365 return rv;
366}
367
368int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
369{
370 struct sockaddr_storage *new_addr;
371 struct dlm_node_addr *new_node, *na;
372
373 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
374 if (!new_node)
375 return -ENOMEM;
376
377 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
378 if (!new_addr) {
379 kfree(new_node);
380 return -ENOMEM;
381 }
382
383 memcpy(new_addr, addr, len);
384
385 spin_lock(&dlm_node_addrs_spin);
386 na = find_node_addr(nodeid);
387 if (!na) {
388 new_node->nodeid = nodeid;
389 new_node->addr[0] = new_addr;
390 new_node->addr_count = 1;
391 list_add(&new_node->list, &dlm_node_addrs);
392 spin_unlock(&dlm_node_addrs_spin);
393 return 0;
394 }
395
396 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
397 spin_unlock(&dlm_node_addrs_spin);
398 kfree(new_addr);
399 kfree(new_node);
400 return -ENOSPC;
401 }
402
403 na->addr[na->addr_count++] = new_addr;
404 spin_unlock(&dlm_node_addrs_spin);
405 kfree(new_node);
406 return 0;
407}
408
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100409/* Data available on socket or listen socket received a connect */
David S. Miller676d2362014-04-11 16:15:36 -0400410static void lowcomms_data_ready(struct sock *sk)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100411{
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000412 struct connection *con;
413
414 read_lock_bh(&sk->sk_callback_lock);
415 con = sock2con(sk);
Patrick Caulfieldafb853f2007-06-01 10:07:26 -0500416 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100417 queue_work(recv_workqueue, &con->rwork);
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000418 read_unlock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100419}
420
421static void lowcomms_write_space(struct sock *sk)
422{
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000423 struct connection *con;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100424
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000425 read_lock_bh(&sk->sk_callback_lock);
426 con = sock2con(sk);
David Millerb36930d2010-11-10 21:56:39 -0800427 if (!con)
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000428 goto out;
David Millerb36930d2010-11-10 21:56:39 -0800429
430 clear_bit(SOCK_NOSPACE, &con->sock->flags);
431
432 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
433 con->sock->sk->sk_write_pending--;
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800434 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
David Millerb36930d2010-11-10 21:56:39 -0800435 }
436
Bob Peterson01da24d2017-09-12 08:55:14 +0000437 queue_work(send_workqueue, &con->swork);
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000438out:
439 read_unlock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100440}
441
442static inline void lowcomms_connect_sock(struct connection *con)
443{
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -0500444 if (test_bit(CF_CLOSE, &con->flags))
445 return;
Bob Peterson61d9102b2017-09-12 08:55:04 +0000446 queue_work(send_workqueue, &con->swork);
447 cond_resched();
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100448}
449
450static void lowcomms_state_change(struct sock *sk)
451{
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300452 /* SCTP layer is not calling sk_data_ready when the connection
453 * is done, so we catch the signal through here. Also, it
454 * doesn't switch socket state when entering shutdown, so we
455 * skip the write in that case.
456 */
457 if (sk->sk_shutdown) {
458 if (sk->sk_shutdown == RCV_SHUTDOWN)
459 lowcomms_data_ready(sk);
460 } else if (sk->sk_state == TCP_ESTABLISHED) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100461 lowcomms_write_space(sk);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300462 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100463}
464
Christine Caulfield391fbdc2009-05-07 10:54:16 -0500465int dlm_lowcomms_connect_node(int nodeid)
466{
467 struct connection *con;
468
469 if (nodeid == dlm_our_nodeid())
470 return 0;
471
472 con = nodeid2con(nodeid, GFP_NOFS);
473 if (!con)
474 return -ENOMEM;
475 lowcomms_connect_sock(con);
476 return 0;
477}
478
Bob Petersonb3a5bbf2015-08-27 09:34:47 -0500479static void lowcomms_error_report(struct sock *sk)
480{
Bob Petersonb81171c2016-02-05 14:39:02 -0500481 struct connection *con;
Bob Petersonb3a5bbf2015-08-27 09:34:47 -0500482 struct sockaddr_storage saddr;
Bob Petersonb81171c2016-02-05 14:39:02 -0500483 void (*orig_report)(struct sock *) = NULL;
Bob Petersonb3a5bbf2015-08-27 09:34:47 -0500484
Bob Petersonb81171c2016-02-05 14:39:02 -0500485 read_lock_bh(&sk->sk_callback_lock);
486 con = sock2con(sk);
487 if (con == NULL)
488 goto out;
489
Bob Petersoncc661fc2017-09-12 08:55:23 +0000490 orig_report = listen_sock.sk_error_report;
Bob Peterson1a318332016-01-18 12:29:15 -0500491 if (con->sock == NULL ||
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100492 kernel_getpeername(con->sock, (struct sockaddr *)&saddr) < 0) {
Bob Petersonb3a5bbf2015-08-27 09:34:47 -0500493 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
494 "sending to node %d, port %d, "
495 "sk_err=%d/%d\n", dlm_our_nodeid(),
496 con->nodeid, dlm_config.ci_tcp_port,
497 sk->sk_err, sk->sk_err_soft);
Bob Petersonb3a5bbf2015-08-27 09:34:47 -0500498 } else if (saddr.ss_family == AF_INET) {
499 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
500
501 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
502 "sending to node %d at %pI4, port %d, "
503 "sk_err=%d/%d\n", dlm_our_nodeid(),
504 con->nodeid, &sin4->sin_addr.s_addr,
505 dlm_config.ci_tcp_port, sk->sk_err,
506 sk->sk_err_soft);
507 } else {
508 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
509
510 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
511 "sending to node %d at %u.%u.%u.%u, "
512 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
513 con->nodeid, sin6->sin6_addr.s6_addr32[0],
514 sin6->sin6_addr.s6_addr32[1],
515 sin6->sin6_addr.s6_addr32[2],
516 sin6->sin6_addr.s6_addr32[3],
517 dlm_config.ci_tcp_port, sk->sk_err,
518 sk->sk_err_soft);
519 }
Bob Petersonb81171c2016-02-05 14:39:02 -0500520out:
521 read_unlock_bh(&sk->sk_callback_lock);
522 if (orig_report)
523 orig_report(sk);
524}
525
526/* Note: sk_callback_lock must be locked before calling this function. */
Bob Petersoncc661fc2017-09-12 08:55:23 +0000527static void save_listen_callbacks(struct socket *sock)
Bob Petersonb81171c2016-02-05 14:39:02 -0500528{
Bob Petersoncc661fc2017-09-12 08:55:23 +0000529 struct sock *sk = sock->sk;
530
531 listen_sock.sk_data_ready = sk->sk_data_ready;
532 listen_sock.sk_state_change = sk->sk_state_change;
533 listen_sock.sk_write_space = sk->sk_write_space;
534 listen_sock.sk_error_report = sk->sk_error_report;
Bob Petersonb81171c2016-02-05 14:39:02 -0500535}
536
Bob Petersoncc661fc2017-09-12 08:55:23 +0000537static void restore_callbacks(struct socket *sock)
Bob Petersonb81171c2016-02-05 14:39:02 -0500538{
Bob Petersoncc661fc2017-09-12 08:55:23 +0000539 struct sock *sk = sock->sk;
540
Bob Petersonb81171c2016-02-05 14:39:02 -0500541 write_lock_bh(&sk->sk_callback_lock);
Bob Petersonb81171c2016-02-05 14:39:02 -0500542 sk->sk_user_data = NULL;
Bob Petersoncc661fc2017-09-12 08:55:23 +0000543 sk->sk_data_ready = listen_sock.sk_data_ready;
544 sk->sk_state_change = listen_sock.sk_state_change;
545 sk->sk_write_space = listen_sock.sk_write_space;
546 sk->sk_error_report = listen_sock.sk_error_report;
Bob Petersonb81171c2016-02-05 14:39:02 -0500547 write_unlock_bh(&sk->sk_callback_lock);
Bob Petersonb3a5bbf2015-08-27 09:34:47 -0500548}
549
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100550/* Make a socket active */
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000551static void add_sock(struct socket *sock, struct connection *con)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100552{
Bob Petersonb81171c2016-02-05 14:39:02 -0500553 struct sock *sk = sock->sk;
554
555 write_lock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100556 con->sock = sock;
557
Bob Petersonb81171c2016-02-05 14:39:02 -0500558 sk->sk_user_data = con;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100559 /* Install a data_ready callback */
Bob Petersonb81171c2016-02-05 14:39:02 -0500560 sk->sk_data_ready = lowcomms_data_ready;
561 sk->sk_write_space = lowcomms_write_space;
562 sk->sk_state_change = lowcomms_state_change;
563 sk->sk_allocation = GFP_NOFS;
564 sk->sk_error_report = lowcomms_error_report;
565 write_unlock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100566}
567
568/* Add the port number to an IPv6 or 4 sockaddr and return the address
569 length */
570static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
571 int *addr_len)
572{
573 saddr->ss_family = dlm_local_addr[0]->ss_family;
574 if (saddr->ss_family == AF_INET) {
575 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
576 in4_addr->sin_port = cpu_to_be16(port);
577 *addr_len = sizeof(struct sockaddr_in);
578 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
579 } else {
580 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
581 in6_addr->sin6_port = cpu_to_be16(port);
582 *addr_len = sizeof(struct sockaddr_in6);
583 }
Patrick Caulfield01c8cab2007-07-17 16:53:15 +0100584 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100585}
586
587/* Close a remote connection and tidy up */
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300588static void close_connection(struct connection *con, bool and_other,
589 bool tx, bool rx)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100590{
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000591 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
592
tsutomu.owa@toshiba.co.jp0aa18462017-09-12 09:02:02 +0000593 if (tx && !closing && cancel_work_sync(&con->swork)) {
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300594 log_print("canceled swork for node %d", con->nodeid);
tsutomu.owa@toshiba.co.jp0aa18462017-09-12 09:02:02 +0000595 clear_bit(CF_WRITE_PENDING, &con->flags);
596 }
597 if (rx && !closing && cancel_work_sync(&con->rwork)) {
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300598 log_print("canceled rwork for node %d", con->nodeid);
tsutomu.owa@toshiba.co.jp0aa18462017-09-12 09:02:02 +0000599 clear_bit(CF_READ_PENDING, &con->flags);
600 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100601
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300602 mutex_lock(&con->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100603 if (con->sock) {
Bob Petersoncc661fc2017-09-12 08:55:23 +0000604 restore_callbacks(con->sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100605 sock_release(con->sock);
606 con->sock = NULL;
607 }
608 if (con->othercon && and_other) {
609 /* Will only re-enter once. */
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300610 close_connection(con->othercon, false, true, true);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100611 }
612 if (con->rx_page) {
613 __free_page(con->rx_page);
614 con->rx_page = NULL;
615 }
Patrick Caulfield9e5f2822007-08-02 14:58:14 +0100616
Patrick Caulfield61d96be02007-08-20 15:13:38 +0100617 con->retries = 0;
618 mutex_unlock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000619 clear_bit(CF_CLOSING, &con->flags);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100620}
621
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100622/* Data received from remote end */
623static int receive_from_sock(struct connection *con)
624{
625 int ret = 0;
626 struct msghdr msg = {};
627 struct kvec iov[2];
628 unsigned len;
629 int r;
630 int call_again_soon = 0;
631 int nvec;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100632
633 mutex_lock(&con->sock_mutex);
634
635 if (con->sock == NULL) {
636 ret = -EAGAIN;
637 goto out_close;
638 }
Marcelo Ricardo Leitneracee4e52015-08-11 19:22:24 -0300639 if (con->nodeid == 0) {
640 ret = -EINVAL;
641 goto out_close;
642 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100643
644 if (con->rx_page == NULL) {
645 /*
646 * This doesn't need to be atomic, but I think it should
647 * improve performance if it is.
648 */
649 con->rx_page = alloc_page(GFP_ATOMIC);
650 if (con->rx_page == NULL)
651 goto out_resched;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300652 cbuf_init(&con->cb, PAGE_SIZE);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100653 }
654
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100655 /*
656 * iov[0] is the bit of the circular buffer between the current end
657 * point (cb.base + cb.len) and the end of the buffer.
658 */
659 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
660 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
661 iov[1].iov_len = 0;
662 nvec = 1;
663
664 /*
665 * iov[1] is the bit of the circular buffer between the start of the
666 * buffer and the start of the currently used section (cb.base)
667 */
668 if (cbuf_data(&con->cb) >= con->cb.base) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300669 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100670 iov[1].iov_len = con->cb.base;
671 iov[1].iov_base = page_address(con->rx_page);
672 nvec = 2;
673 }
674 len = iov[0].iov_len + iov[1].iov_len;
David Howellsaa563d72018-10-20 00:57:56 +0100675 iov_iter_kvec(&msg.msg_iter, READ, iov, nvec, len);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100676
Al Viroc8c78402017-09-20 19:52:42 -0400677 r = ret = sock_recvmsg(con->sock, &msg, MSG_DONTWAIT | MSG_NOSIGNAL);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100678 if (ret <= 0)
679 goto out_close;
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300680 else if (ret == len)
681 call_again_soon = 1;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100682
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100683 cbuf_add(&con->cb, ret);
684 ret = dlm_process_incoming_buffer(con->nodeid,
685 page_address(con->rx_page),
686 con->cb.base, con->cb.len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300687 PAGE_SIZE);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100688 if (ret == -EBADMSG) {
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300689 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
690 page_address(con->rx_page), con->cb.base,
691 con->cb.len, r);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100692 }
693 if (ret < 0)
694 goto out_close;
695 cbuf_eat(&con->cb, ret);
696
697 if (cbuf_empty(&con->cb) && !call_again_soon) {
698 __free_page(con->rx_page);
699 con->rx_page = NULL;
700 }
701
702 if (call_again_soon)
703 goto out_resched;
704 mutex_unlock(&con->sock_mutex);
705 return 0;
706
707out_resched:
708 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
709 queue_work(recv_workqueue, &con->rwork);
710 mutex_unlock(&con->sock_mutex);
711 return -EAGAIN;
712
713out_close:
714 mutex_unlock(&con->sock_mutex);
Patrick Caulfield9e5f2822007-08-02 14:58:14 +0100715 if (ret != -EAGAIN) {
tsutomu.owa@toshiba.co.jpc553e172017-09-12 08:56:15 +0000716 close_connection(con, true, true, false);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100717 /* Reconnect when there is something to send */
718 }
719 /* Don't return success if we really got EOF */
720 if (ret == 0)
721 ret = -EAGAIN;
722
723 return ret;
724}
725
726/* Listening socket is busy, accept a connection */
Christoph Hellwig0774dc72020-05-27 20:22:28 +0200727static int accept_from_sock(struct connection *con)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100728{
729 int result;
730 struct sockaddr_storage peeraddr;
731 struct socket *newsock;
732 int len;
733 int nodeid;
734 struct connection *newcon;
735 struct connection *addcon;
736
David Teigland513ef592012-03-30 11:46:08 -0500737 mutex_lock(&connections_lock);
738 if (!dlm_allow_conn) {
739 mutex_unlock(&connections_lock);
740 return -1;
741 }
742 mutex_unlock(&connections_lock);
743
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100744 mutex_lock_nested(&con->sock_mutex, 0);
745
tsutomu.owa@toshiba.co.jp3421fb12017-09-12 09:01:38 +0000746 if (!con->sock) {
747 mutex_unlock(&con->sock_mutex);
748 return -ENOTCONN;
749 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100750
tsutomu.owa@toshiba.co.jp3421fb12017-09-12 09:01:38 +0000751 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100752 if (result < 0)
753 goto accept_err;
754
755 /* Get the connected socket's peer */
756 memset(&peeraddr, 0, sizeof(peeraddr));
Denys Vlasenko9b2c45d2018-02-12 20:00:20 +0100757 len = newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr, 2);
758 if (len < 0) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100759 result = -ECONNABORTED;
760 goto accept_err;
761 }
762
763 /* Get the new node's NODEID */
764 make_sockaddr(&peeraddr, 0, &len);
David Teigland36b71a82012-07-26 12:44:30 -0500765 if (addr_to_nodeid(&peeraddr, &nodeid)) {
Masatake YAMATObcaadf52011-07-04 12:25:51 +0900766 unsigned char *b=(unsigned char *)&peeraddr;
David Teigland617e82e2007-04-26 13:46:49 -0500767 log_print("connect from non cluster node");
Masatake YAMATObcaadf52011-07-04 12:25:51 +0900768 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
769 b, sizeof(struct sockaddr_storage));
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100770 sock_release(newsock);
771 mutex_unlock(&con->sock_mutex);
772 return -1;
773 }
774
775 log_print("got connection from %d", nodeid);
776
777 /* Check to see if we already have a connection to this node. This
778 * could happen if the two nodes initiate a connection at roughly
779 * the same time and the connections cross on the wire.
780 * In this case we store the incoming one in "othercon"
781 */
David Teigland748285c2009-05-15 10:50:57 -0500782 newcon = nodeid2con(nodeid, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100783 if (!newcon) {
784 result = -ENOMEM;
785 goto accept_err;
786 }
787 mutex_lock_nested(&newcon->sock_mutex, 1);
788 if (newcon->sock) {
789 struct connection *othercon = newcon->othercon;
790
791 if (!othercon) {
David Teigland748285c2009-05-15 10:50:57 -0500792 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100793 if (!othercon) {
David Teigland617e82e2007-04-26 13:46:49 -0500794 log_print("failed to allocate incoming socket");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100795 mutex_unlock(&newcon->sock_mutex);
796 result = -ENOMEM;
797 goto accept_err;
798 }
799 othercon->nodeid = nodeid;
800 othercon->rx_action = receive_from_sock;
801 mutex_init(&othercon->sock_mutex);
tsutomu.owa@toshiba.co.jp26b41092017-09-12 09:02:10 +0000802 INIT_LIST_HEAD(&othercon->writequeue);
803 spin_lock_init(&othercon->writequeue_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100804 INIT_WORK(&othercon->swork, process_send_sockets);
805 INIT_WORK(&othercon->rwork, process_recv_sockets);
806 set_bit(CF_IS_OTHERCON, &othercon->flags);
Patrick Caulfield61d96be02007-08-20 15:13:38 +0100807 }
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000808 mutex_lock_nested(&othercon->sock_mutex, 2);
Patrick Caulfield61d96be02007-08-20 15:13:38 +0100809 if (!othercon->sock) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100810 newcon->othercon = othercon;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000811 add_sock(newsock, othercon);
Patrick Caulfield97d84832007-06-27 11:36:23 +0100812 addcon = othercon;
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000813 mutex_unlock(&othercon->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100814 }
Patrick Caulfield97d84832007-06-27 11:36:23 +0100815 else {
816 printk("Extra connection from node %d attempted\n", nodeid);
817 result = -EAGAIN;
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000818 mutex_unlock(&othercon->sock_mutex);
akpm@linux-foundation.orgf4fadb23c2007-06-27 14:43:37 -0700819 mutex_unlock(&newcon->sock_mutex);
Patrick Caulfield97d84832007-06-27 11:36:23 +0100820 goto accept_err;
821 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100822 }
823 else {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100824 newcon->rx_action = receive_from_sock;
Bob Peterson3735b4b2016-09-23 14:23:26 -0400825 /* accept copies the sk after we've saved the callbacks, so we
826 don't want to save them a second time or comm errors will
827 result in calling sk_error_report recursively. */
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000828 add_sock(newsock, newcon);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100829 addcon = newcon;
830 }
831
832 mutex_unlock(&newcon->sock_mutex);
833
834 /*
835 * Add it to the active queue in case we got data
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300836 * between processing the accept adding the socket
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100837 * to the read_sockets list
838 */
839 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
840 queue_work(recv_workqueue, &addcon->rwork);
841 mutex_unlock(&con->sock_mutex);
842
843 return 0;
844
845accept_err:
846 mutex_unlock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jp3421fb12017-09-12 09:01:38 +0000847 if (newsock)
848 sock_release(newsock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100849
850 if (result != -EAGAIN)
David Teigland617e82e2007-04-26 13:46:49 -0500851 log_print("error accepting connection from node: %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100852 return result;
853}
854
855static void free_entry(struct writequeue_entry *e)
856{
857 __free_page(e->page);
858 kfree(e);
859}
860
Mike Christie5d689872013-06-14 04:56:13 -0500861/*
862 * writequeue_entry_complete - try to delete and free write queue entry
863 * @e: write queue entry to try to delete
864 * @completed: bytes completed
865 *
866 * writequeue_lock must be held.
867 */
868static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
869{
870 e->offset += completed;
871 e->len -= completed;
872
873 if (e->len == 0 && e->users == 0) {
874 list_del(&e->list);
875 free_entry(e);
876 }
877}
878
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300879/*
880 * sctp_bind_addrs - bind a SCTP socket to all our addresses
881 */
882static int sctp_bind_addrs(struct connection *con, uint16_t port)
883{
884 struct sockaddr_storage localaddr;
885 int i, addr_len, result = 0;
886
887 for (i = 0; i < dlm_local_count; i++) {
888 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
889 make_sockaddr(&localaddr, port, &addr_len);
890
891 if (!i)
892 result = kernel_bind(con->sock,
893 (struct sockaddr *)&localaddr,
894 addr_len);
895 else
896 result = kernel_setsockopt(con->sock, SOL_SCTP,
897 SCTP_SOCKOPT_BINDX_ADD,
898 (char *)&localaddr, addr_len);
899
900 if (result < 0) {
901 log_print("Can't bind to %d addr number %d, %d.\n",
902 port, i + 1, result);
903 break;
904 }
905 }
906 return result;
907}
908
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100909/* Initiate an SCTP association.
910 This is a special case of send_to_sock() in that we don't yet have a
911 peeled-off socket for this association, so we use the listening socket
912 and add the primary IP address of the remote node.
913 */
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300914static void sctp_connect_to_sock(struct connection *con)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100915{
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300916 struct sockaddr_storage daddr;
917 int one = 1;
918 int result;
919 int addr_len;
920 struct socket *sock;
Arnd Bergmann5311f702019-10-25 22:28:01 +0200921 struct __kernel_sock_timeval tv = { .tv_sec = 5, .tv_usec = 0 };
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300922
923 if (con->nodeid == 0) {
924 log_print("attempt to connect sock 0 foiled");
925 return;
926 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100927
Mike Christie5d689872013-06-14 04:56:13 -0500928 mutex_lock(&con->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100929
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300930 /* Some odd races can cause double-connects, ignore them */
931 if (con->retries++ > MAX_CONNECT_RETRIES)
932 goto out;
933
934 if (con->sock) {
935 log_print("node %d already connected.", con->nodeid);
936 goto out;
937 }
938
939 memset(&daddr, 0, sizeof(daddr));
940 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
941 if (result < 0) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100942 log_print("no address for nodeid %d", con->nodeid);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300943 goto out;
David Teigland04bedd72009-09-18 14:31:47 -0500944 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100945
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300946 /* Create a socket to communicate with */
947 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
948 SOCK_STREAM, IPPROTO_SCTP, &sock);
949 if (result < 0)
950 goto socket_err;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100951
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300952 con->rx_action = receive_from_sock;
953 con->connect_action = sctp_connect_to_sock;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000954 add_sock(sock, con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100955
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300956 /* Bind to all addresses. */
957 if (sctp_bind_addrs(con, 0))
958 goto bind_err;
Mike Christie98e1b602013-06-14 04:56:12 -0500959
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300960 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100961
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300962 log_print("connecting to %d", con->nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100963
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300964 /* Turn off Nagle's algorithm */
Gang Heb09c6032018-05-02 10:37:48 +0800965 kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300966 sizeof(one));
967
Gang Hef706d832018-05-02 10:28:35 -0500968 /*
969 * Make sock->ops->connect() function return in specified time,
970 * since O_NONBLOCK argument in connect() function does not work here,
971 * then, we should restore the default value of this attribute.
972 */
Arnd Bergmann5311f702019-10-25 22:28:01 +0200973 kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
Gang Hef706d832018-05-02 10:28:35 -0500974 sizeof(tv));
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300975 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
Gang Heda3627c2018-05-29 11:09:22 +0800976 0);
Gang Hef706d832018-05-02 10:28:35 -0500977 memset(&tv, 0, sizeof(tv));
Arnd Bergmann5311f702019-10-25 22:28:01 +0200978 kernel_setsockopt(sock, SOL_SOCKET, SO_SNDTIMEO_NEW, (char *)&tv,
Gang Hef706d832018-05-02 10:28:35 -0500979 sizeof(tv));
980
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300981 if (result == -EINPROGRESS)
982 result = 0;
983 if (result == 0)
984 goto out;
985
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300986bind_err:
987 con->sock = NULL;
988 sock_release(sock);
989
990socket_err:
991 /*
992 * Some errors are fatal and this list might need adjusting. For other
993 * errors we try again until the max number of retries is reached.
994 */
995 if (result != -EHOSTUNREACH &&
996 result != -ENETUNREACH &&
997 result != -ENETDOWN &&
998 result != -EINVAL &&
999 result != -EPROTONOSUPPORT) {
1000 log_print("connect %d try %d error %d", con->nodeid,
1001 con->retries, result);
1002 mutex_unlock(&con->sock_mutex);
1003 msleep(1000);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001004 lowcomms_connect_sock(con);
1005 return;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001006 }
Mike Christie5d689872013-06-14 04:56:13 -05001007
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001008out:
Mike Christie5d689872013-06-14 04:56:13 -05001009 mutex_unlock(&con->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001010}
1011
1012/* Connect a new socket to its peer */
1013static void tcp_connect_to_sock(struct connection *con)
1014{
Lon Hohberger6bd8fed2007-10-25 18:51:54 -04001015 struct sockaddr_storage saddr, src_addr;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001016 int addr_len;
Casey Dahlina89d63a2009-07-14 12:17:51 -05001017 struct socket *sock = NULL;
David Teiglandcb2d45d2010-11-12 11:12:55 -06001018 int one = 1;
David Teigland36b71a82012-07-26 12:44:30 -05001019 int result;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001020
1021 if (con->nodeid == 0) {
1022 log_print("attempt to connect sock 0 foiled");
1023 return;
1024 }
1025
1026 mutex_lock(&con->sock_mutex);
1027 if (con->retries++ > MAX_CONNECT_RETRIES)
1028 goto out;
1029
1030 /* Some odd races can cause double-connects, ignore them */
David Teigland36b71a82012-07-26 12:44:30 -05001031 if (con->sock)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001032 goto out;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001033
1034 /* Create a socket to communicate with */
Eric W. Biedermaneeb1bd52015-05-08 21:08:05 -05001035 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1036 SOCK_STREAM, IPPROTO_TCP, &sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001037 if (result < 0)
1038 goto out_err;
1039
1040 memset(&saddr, 0, sizeof(saddr));
Mike Christie98e1b602013-06-14 04:56:12 -05001041 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
David Teigland36b71a82012-07-26 12:44:30 -05001042 if (result < 0) {
1043 log_print("no address for nodeid %d", con->nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001044 goto out_err;
David Teigland36b71a82012-07-26 12:44:30 -05001045 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001046
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001047 con->rx_action = receive_from_sock;
1048 con->connect_action = tcp_connect_to_sock;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +00001049 add_sock(sock, con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001050
Lon Hohberger6bd8fed2007-10-25 18:51:54 -04001051 /* Bind to our cluster-known address connecting to avoid
1052 routing problems */
1053 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1054 make_sockaddr(&src_addr, 0, &addr_len);
1055 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1056 addr_len);
1057 if (result < 0) {
1058 log_print("could not bind for connect: %d", result);
1059 /* This *may* not indicate a critical error */
1060 }
1061
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001062 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1063
1064 log_print("connecting to %d", con->nodeid);
David Teiglandcb2d45d2010-11-12 11:12:55 -06001065
1066 /* Turn off Nagle's algorithm */
1067 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1068 sizeof(one));
1069
David Teigland36b71a82012-07-26 12:44:30 -05001070 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001071 O_NONBLOCK);
1072 if (result == -EINPROGRESS)
1073 result = 0;
1074 if (result == 0)
1075 goto out;
1076
1077out_err:
1078 if (con->sock) {
1079 sock_release(con->sock);
1080 con->sock = NULL;
Casey Dahlina89d63a2009-07-14 12:17:51 -05001081 } else if (sock) {
1082 sock_release(sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001083 }
1084 /*
1085 * Some errors are fatal and this list might need adjusting. For other
1086 * errors we try again until the max number of retries is reached.
1087 */
David Teigland36b71a82012-07-26 12:44:30 -05001088 if (result != -EHOSTUNREACH &&
1089 result != -ENETUNREACH &&
1090 result != -ENETDOWN &&
1091 result != -EINVAL &&
1092 result != -EPROTONOSUPPORT) {
1093 log_print("connect %d try %d error %d", con->nodeid,
1094 con->retries, result);
1095 mutex_unlock(&con->sock_mutex);
1096 msleep(1000);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001097 lowcomms_connect_sock(con);
David Teigland36b71a82012-07-26 12:44:30 -05001098 return;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001099 }
1100out:
1101 mutex_unlock(&con->sock_mutex);
1102 return;
1103}
1104
1105static struct socket *tcp_create_listen_sock(struct connection *con,
1106 struct sockaddr_storage *saddr)
1107{
1108 struct socket *sock = NULL;
1109 int result = 0;
1110 int one = 1;
1111 int addr_len;
1112
1113 if (dlm_local_addr[0]->ss_family == AF_INET)
1114 addr_len = sizeof(struct sockaddr_in);
1115 else
1116 addr_len = sizeof(struct sockaddr_in6);
1117
1118 /* Create a socket to communicate with */
Eric W. Biedermaneeb1bd52015-05-08 21:08:05 -05001119 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1120 SOCK_STREAM, IPPROTO_TCP, &sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001121 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001122 log_print("Can't create listening comms socket");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001123 goto create_out;
1124 }
1125
David Teiglandcb2d45d2010-11-12 11:12:55 -06001126 /* Turn off Nagle's algorithm */
1127 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1128 sizeof(one));
1129
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001130 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
1131 (char *)&one, sizeof(one));
1132
1133 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001134 log_print("Failed to set SO_REUSEADDR on socket: %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001135 }
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001136 write_lock_bh(&sock->sk->sk_callback_lock);
Bob Petersonb81171c2016-02-05 14:39:02 -05001137 sock->sk->sk_user_data = con;
Bob Petersoncc661fc2017-09-12 08:55:23 +00001138 save_listen_callbacks(sock);
Christoph Hellwig0774dc72020-05-27 20:22:28 +02001139 con->rx_action = accept_from_sock;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001140 con->connect_action = tcp_connect_to_sock;
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001141 write_unlock_bh(&sock->sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001142
1143 /* Bind to our port */
1144 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1145 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1146 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001147 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001148 sock_release(sock);
1149 sock = NULL;
1150 con->sock = NULL;
1151 goto create_out;
1152 }
1153 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
1154 (char *)&one, sizeof(one));
1155 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001156 log_print("Set keepalive failed: %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001157 }
1158
1159 result = sock->ops->listen(sock, 5);
1160 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001161 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001162 sock_release(sock);
1163 sock = NULL;
1164 goto create_out;
1165 }
1166
1167create_out:
1168 return sock;
1169}
1170
1171/* Get local addresses */
1172static void init_local(void)
1173{
1174 struct sockaddr_storage sas, *addr;
1175 int i;
1176
Patrick Caulfield30d3a232007-04-23 16:26:21 +01001177 dlm_local_count = 0;
David Teigland1b189b82012-03-21 09:18:34 -05001178 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001179 if (dlm_our_addr(&sas, i))
1180 break;
1181
Amitoj Kaur Chawla5c93f562016-06-23 10:22:01 +05301182 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001183 if (!addr)
1184 break;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001185 dlm_local_addr[dlm_local_count++] = addr;
1186 }
1187}
1188
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001189/* Initialise SCTP socket and bind to all interfaces */
1190static int sctp_listen_for_all(void)
1191{
1192 struct socket *sock = NULL;
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001193 int result = -EINVAL;
David Teigland573c24c2009-11-30 16:34:43 -06001194 struct connection *con = nodeid2con(0, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001195 int bufsize = NEEDED_RMEM;
Mike Christie86e92ad2013-06-14 04:56:14 -05001196 int one = 1;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001197
1198 if (!con)
1199 return -ENOMEM;
1200
1201 log_print("Using SCTP for communications");
1202
Eric W. Biedermaneeb1bd52015-05-08 21:08:05 -05001203 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001204 SOCK_STREAM, IPPROTO_SCTP, &sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001205 if (result < 0) {
1206 log_print("Can't create comms socket, check SCTP is loaded");
1207 goto out;
1208 }
1209
David S. Millerdf61c952007-11-06 23:48:57 -08001210 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001211 (char *)&bufsize, sizeof(bufsize));
1212 if (result)
David Teigland617e82e2007-04-26 13:46:49 -05001213 log_print("Error increasing buffer space on socket %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001214
Mike Christie86e92ad2013-06-14 04:56:14 -05001215 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
1216 sizeof(one));
1217 if (result < 0)
1218 log_print("Could not set SCTP NODELAY error %d\n", result);
1219
Bob Petersonb81171c2016-02-05 14:39:02 -05001220 write_lock_bh(&sock->sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001221 /* Init con struct */
1222 sock->sk->sk_user_data = con;
Bob Petersoncc661fc2017-09-12 08:55:23 +00001223 save_listen_callbacks(sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001224 con->sock = sock;
1225 con->sock->sk->sk_data_ready = lowcomms_data_ready;
Christoph Hellwig0774dc72020-05-27 20:22:28 +02001226 con->rx_action = accept_from_sock;
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001227 con->connect_action = sctp_connect_to_sock;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001228
Bob Petersonb81171c2016-02-05 14:39:02 -05001229 write_unlock_bh(&sock->sk->sk_callback_lock);
1230
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001231 /* Bind to all addresses. */
1232 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
1233 goto create_delsock;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001234
1235 result = sock->ops->listen(sock, 5);
1236 if (result < 0) {
1237 log_print("Can't set socket listening");
1238 goto create_delsock;
1239 }
1240
1241 return 0;
1242
1243create_delsock:
1244 sock_release(sock);
1245 con->sock = NULL;
1246out:
1247 return result;
1248}
1249
1250static int tcp_listen_for_all(void)
1251{
1252 struct socket *sock = NULL;
David Teigland573c24c2009-11-30 16:34:43 -06001253 struct connection *con = nodeid2con(0, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001254 int result = -EINVAL;
1255
1256 if (!con)
1257 return -ENOMEM;
1258
1259 /* We don't support multi-homed hosts */
1260 if (dlm_local_addr[1] != NULL) {
David Teigland617e82e2007-04-26 13:46:49 -05001261 log_print("TCP protocol can't handle multi-homed hosts, "
1262 "try SCTP");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001263 return -EINVAL;
1264 }
1265
1266 log_print("Using TCP for communications");
1267
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001268 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1269 if (sock) {
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +00001270 add_sock(sock, con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001271 result = 0;
1272 }
1273 else {
1274 result = -EADDRINUSE;
1275 }
1276
1277 return result;
1278}
1279
1280
1281
1282static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1283 gfp_t allocation)
1284{
1285 struct writequeue_entry *entry;
1286
1287 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1288 if (!entry)
1289 return NULL;
1290
1291 entry->page = alloc_page(allocation);
1292 if (!entry->page) {
1293 kfree(entry);
1294 return NULL;
1295 }
1296
1297 entry->offset = 0;
1298 entry->len = 0;
1299 entry->end = 0;
1300 entry->users = 0;
1301 entry->con = con;
1302
1303 return entry;
1304}
1305
David Teigland617e82e2007-04-26 13:46:49 -05001306void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001307{
1308 struct connection *con;
1309 struct writequeue_entry *e;
1310 int offset = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001311
1312 con = nodeid2con(nodeid, allocation);
1313 if (!con)
1314 return NULL;
1315
1316 spin_lock(&con->writequeue_lock);
1317 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1318 if ((&e->list == &con->writequeue) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001319 (PAGE_SIZE - e->end < len)) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001320 e = NULL;
1321 } else {
1322 offset = e->end;
1323 e->end += len;
Wei Yongjuneeee2b52012-10-18 22:57:19 +08001324 e->users++;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001325 }
1326 spin_unlock(&con->writequeue_lock);
1327
1328 if (e) {
1329 got_one:
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001330 *ppc = page_address(e->page) + offset;
1331 return e;
1332 }
1333
1334 e = new_writequeue_entry(con, allocation);
1335 if (e) {
1336 spin_lock(&con->writequeue_lock);
1337 offset = e->end;
1338 e->end += len;
Wei Yongjuneeee2b52012-10-18 22:57:19 +08001339 e->users++;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001340 list_add_tail(&e->list, &con->writequeue);
1341 spin_unlock(&con->writequeue_lock);
1342 goto got_one;
1343 }
1344 return NULL;
1345}
1346
1347void dlm_lowcomms_commit_buffer(void *mh)
1348{
1349 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1350 struct connection *con = e->con;
1351 int users;
1352
1353 spin_lock(&con->writequeue_lock);
1354 users = --e->users;
1355 if (users)
1356 goto out;
1357 e->len = e->end - e->offset;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001358 spin_unlock(&con->writequeue_lock);
1359
Bob Peterson01da24d2017-09-12 08:55:14 +00001360 queue_work(send_workqueue, &con->swork);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001361 return;
1362
1363out:
1364 spin_unlock(&con->writequeue_lock);
1365 return;
1366}
1367
1368/* Send a message */
1369static void send_to_sock(struct connection *con)
1370{
1371 int ret = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001372 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1373 struct writequeue_entry *e;
1374 int len, offset;
Bob Petersonf92c8dd2010-11-12 11:15:20 -06001375 int count = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001376
1377 mutex_lock(&con->sock_mutex);
1378 if (con->sock == NULL)
1379 goto out_connect;
1380
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001381 spin_lock(&con->writequeue_lock);
1382 for (;;) {
1383 e = list_entry(con->writequeue.next, struct writequeue_entry,
1384 list);
1385 if ((struct list_head *) e == &con->writequeue)
1386 break;
1387
1388 len = e->len;
1389 offset = e->offset;
1390 BUG_ON(len == 0 && e->users == 0);
1391 spin_unlock(&con->writequeue_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001392
1393 ret = 0;
1394 if (len) {
Paolo Bonzini1329e3f2009-08-24 13:18:04 -05001395 ret = kernel_sendpage(con->sock, e->page, offset, len,
1396 msg_flags);
Patrick Caulfieldd66f8272007-09-14 08:49:21 +01001397 if (ret == -EAGAIN || ret == 0) {
David Millerb36930d2010-11-10 21:56:39 -08001398 if (ret == -EAGAIN &&
Eric Dumazet9cd3e072015-11-29 20:03:10 -08001399 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
David Millerb36930d2010-11-10 21:56:39 -08001400 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1401 /* Notify TCP that we're limited by the
1402 * application window size.
1403 */
1404 set_bit(SOCK_NOSPACE, &con->sock->flags);
1405 con->sock->sk->sk_write_pending++;
1406 }
Patrick Caulfieldd66f8272007-09-14 08:49:21 +01001407 cond_resched();
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001408 goto out;
Ying Xue9c5bef52012-08-13 14:29:55 +08001409 } else if (ret < 0)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001410 goto send_error;
Patrick Caulfieldd66f8272007-09-14 08:49:21 +01001411 }
Bob Petersonf92c8dd2010-11-12 11:15:20 -06001412
1413 /* Don't starve people filling buffers */
1414 if (++count >= MAX_SEND_MSG_COUNT) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001415 cond_resched();
Bob Petersonf92c8dd2010-11-12 11:15:20 -06001416 count = 0;
1417 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001418
1419 spin_lock(&con->writequeue_lock);
Mike Christie5d689872013-06-14 04:56:13 -05001420 writequeue_entry_complete(e, ret);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001421 }
1422 spin_unlock(&con->writequeue_lock);
1423out:
1424 mutex_unlock(&con->sock_mutex);
1425 return;
1426
1427send_error:
1428 mutex_unlock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jpc553e172017-09-12 08:56:15 +00001429 close_connection(con, true, false, true);
Bob Peterson01da24d2017-09-12 08:55:14 +00001430 /* Requeue the send work. When the work daemon runs again, it will try
1431 a new connection, then call this function again. */
1432 queue_work(send_workqueue, &con->swork);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001433 return;
1434
1435out_connect:
1436 mutex_unlock(&con->sock_mutex);
Bob Peterson01da24d2017-09-12 08:55:14 +00001437 queue_work(send_workqueue, &con->swork);
1438 cond_resched();
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001439}
1440
1441static void clean_one_writequeue(struct connection *con)
1442{
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001443 struct writequeue_entry *e, *safe;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001444
1445 spin_lock(&con->writequeue_lock);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001446 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001447 list_del(&e->list);
1448 free_entry(e);
1449 }
1450 spin_unlock(&con->writequeue_lock);
1451}
1452
1453/* Called from recovery when it knows that a node has
1454 left the cluster */
1455int dlm_lowcomms_close(int nodeid)
1456{
1457 struct connection *con;
David Teigland36b71a82012-07-26 12:44:30 -05001458 struct dlm_node_addr *na;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001459
1460 log_print("closing connection to node %d", nodeid);
1461 con = nodeid2con(nodeid, 0);
1462 if (con) {
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -05001463 set_bit(CF_CLOSE, &con->flags);
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -03001464 close_connection(con, true, true, true);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001465 clean_one_writequeue(con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001466 }
David Teigland36b71a82012-07-26 12:44:30 -05001467
1468 spin_lock(&dlm_node_addrs_spin);
1469 na = find_node_addr(nodeid);
1470 if (na) {
1471 list_del(&na->list);
1472 while (na->addr_count--)
1473 kfree(na->addr[na->addr_count]);
1474 kfree(na);
1475 }
1476 spin_unlock(&dlm_node_addrs_spin);
1477
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001478 return 0;
1479}
1480
1481/* Receive workqueue function */
1482static void process_recv_sockets(struct work_struct *work)
1483{
1484 struct connection *con = container_of(work, struct connection, rwork);
1485 int err;
1486
1487 clear_bit(CF_READ_PENDING, &con->flags);
1488 do {
1489 err = con->rx_action(con);
1490 } while (!err);
1491}
1492
1493/* Send workqueue function */
1494static void process_send_sockets(struct work_struct *work)
1495{
1496 struct connection *con = container_of(work, struct connection, swork);
1497
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001498 clear_bit(CF_WRITE_PENDING, &con->flags);
Bob Peterson61d9102b2017-09-12 08:55:04 +00001499 if (con->sock == NULL) /* not mutex protected so check it inside too */
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001500 con->connect_action(con);
Bob Peterson01da24d2017-09-12 08:55:14 +00001501 if (!list_empty(&con->writequeue))
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -05001502 send_to_sock(con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001503}
1504
1505
1506/* Discard all entries on the write queues */
1507static void clean_writequeues(void)
1508{
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001509 foreach_conn(clean_one_writequeue);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001510}
1511
1512static void work_stop(void)
1513{
David Windsorb3555162019-04-02 08:37:10 -04001514 if (recv_workqueue)
1515 destroy_workqueue(recv_workqueue);
1516 if (send_workqueue)
1517 destroy_workqueue(send_workqueue);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001518}
1519
1520static int work_start(void)
1521{
David Teiglande43f0552011-03-10 13:22:34 -06001522 recv_workqueue = alloc_workqueue("dlm_recv",
1523 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
Namhyung Kimb9d41052010-12-13 13:42:24 -06001524 if (!recv_workqueue) {
1525 log_print("can't start dlm_recv");
1526 return -ENOMEM;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001527 }
1528
David Teiglande43f0552011-03-10 13:22:34 -06001529 send_workqueue = alloc_workqueue("dlm_send",
1530 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
Namhyung Kimb9d41052010-12-13 13:42:24 -06001531 if (!send_workqueue) {
1532 log_print("can't start dlm_send");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001533 destroy_workqueue(recv_workqueue);
Namhyung Kimb9d41052010-12-13 13:42:24 -06001534 return -ENOMEM;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001535 }
1536
1537 return 0;
1538}
1539
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001540static void _stop_conn(struct connection *con, bool and_other)
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001541{
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001542 mutex_lock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jp173a31f2017-09-12 09:01:24 +00001543 set_bit(CF_CLOSE, &con->flags);
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001544 set_bit(CF_READ_PENDING, &con->flags);
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001545 set_bit(CF_WRITE_PENDING, &con->flags);
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001546 if (con->sock && con->sock->sk) {
1547 write_lock_bh(&con->sock->sk->sk_callback_lock);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001548 con->sock->sk->sk_user_data = NULL;
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001549 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1550 }
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001551 if (con->othercon && and_other)
1552 _stop_conn(con->othercon, false);
1553 mutex_unlock(&con->sock_mutex);
1554}
1555
1556static void stop_conn(struct connection *con)
1557{
1558 _stop_conn(con, true);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001559}
1560
1561static void free_conn(struct connection *con)
1562{
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -03001563 close_connection(con, true, true, true);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001564 if (con->othercon)
1565 kmem_cache_free(con_cache, con->othercon);
1566 hlist_del(&con->list);
1567 kmem_cache_free(con_cache, con);
1568}
1569
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001570static void work_flush(void)
1571{
1572 int ok;
1573 int i;
1574 struct hlist_node *n;
1575 struct connection *con;
1576
David Windsorb3555162019-04-02 08:37:10 -04001577 if (recv_workqueue)
1578 flush_workqueue(recv_workqueue);
1579 if (send_workqueue)
1580 flush_workqueue(send_workqueue);
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001581 do {
1582 ok = 1;
1583 foreach_conn(stop_conn);
David Windsorb3555162019-04-02 08:37:10 -04001584 if (recv_workqueue)
1585 flush_workqueue(recv_workqueue);
1586 if (send_workqueue)
1587 flush_workqueue(send_workqueue);
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001588 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1589 hlist_for_each_entry_safe(con, n,
1590 &connection_hash[i], list) {
1591 ok &= test_bit(CF_READ_PENDING, &con->flags);
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001592 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1593 if (con->othercon) {
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001594 ok &= test_bit(CF_READ_PENDING,
1595 &con->othercon->flags);
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001596 ok &= test_bit(CF_WRITE_PENDING,
1597 &con->othercon->flags);
1598 }
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001599 }
1600 }
1601 } while (!ok);
1602}
1603
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001604void dlm_lowcomms_stop(void)
1605{
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001606 /* Set all the flags to prevent any
1607 socket activity.
1608 */
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -05001609 mutex_lock(&connections_lock);
David Teigland513ef592012-03-30 11:46:08 -05001610 dlm_allow_conn = 0;
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001611 mutex_unlock(&connections_lock);
1612 work_flush();
Marcelo Ricardo Leitner3a8db792016-10-08 10:14:37 -03001613 clean_writequeues();
1614 foreach_conn(free_conn);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001615 work_stop();
1616
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001617 kmem_cache_destroy(con_cache);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001618}
1619
1620int dlm_lowcomms_start(void)
1621{
1622 int error = -EINVAL;
1623 struct connection *con;
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001624 int i;
1625
1626 for (i = 0; i < CONN_HASH_SIZE; i++)
1627 INIT_HLIST_HEAD(&connection_hash[i]);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001628
1629 init_local();
1630 if (!dlm_local_count) {
David Teigland617e82e2007-04-26 13:46:49 -05001631 error = -ENOTCONN;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001632 log_print("no local IP address has been set");
David Teigland513ef592012-03-30 11:46:08 -05001633 goto fail;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001634 }
1635
1636 error = -ENOMEM;
1637 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1638 __alignof__(struct connection), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09001639 NULL);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001640 if (!con_cache)
David Teigland513ef592012-03-30 11:46:08 -05001641 goto fail;
1642
1643 error = work_start();
1644 if (error)
1645 goto fail_destroy;
1646
1647 dlm_allow_conn = 1;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001648
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001649 /* Start listening */
1650 if (dlm_config.ci_protocol == 0)
1651 error = tcp_listen_for_all();
1652 else
1653 error = sctp_listen_for_all();
1654 if (error)
1655 goto fail_unlisten;
1656
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001657 return 0;
1658
1659fail_unlisten:
David Teigland513ef592012-03-30 11:46:08 -05001660 dlm_allow_conn = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001661 con = nodeid2con(0,0);
1662 if (con) {
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -03001663 close_connection(con, false, true, true);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001664 kmem_cache_free(con_cache, con);
1665 }
David Teigland513ef592012-03-30 11:46:08 -05001666fail_destroy:
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001667 kmem_cache_destroy(con_cache);
David Teigland513ef592012-03-30 11:46:08 -05001668fail:
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001669 return error;
1670}
David Teigland36b71a82012-07-26 12:44:30 -05001671
1672void dlm_lowcomms_exit(void)
1673{
1674 struct dlm_node_addr *na, *safe;
1675
1676 spin_lock(&dlm_node_addrs_spin);
1677 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1678 list_del(&na->list);
1679 while (na->addr_count--)
1680 kfree(na->addr[na->addr_count]);
1681 kfree(na);
1682 }
1683 spin_unlock(&dlm_node_addrs_spin);
1684}