blob: 5d0de91adc366d822966f7964d39ed6dc2342967 [file] [log] [blame]
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001/******************************************************************************
2*******************************************************************************
3**
4** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06005** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01006**
7** This copyrighted material is made available to anyone wishing to use,
8** modify, copy, or redistribute it subject to the terms and conditions
9** of the GNU General Public License v.2.
10**
11*******************************************************************************
12******************************************************************************/
13
14/*
15 * lowcomms.c
16 *
17 * This is the "low-level" comms layer.
18 *
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
21 *
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
Joe Perches2cf12c02009-01-22 13:26:47 -080024 * be expanded for the cluster infrastructure then that is its
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010025 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
28 *
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
38 *
Joe Perches2cf12c02009-01-22 13:26:47 -080039 * lowcomms will choose to use either TCP or SCTP as its transport layer
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010040 * depending on the configuration variable 'protocol'. This should be set
Joe Perches2cf12c02009-01-22 13:26:47 -080041 * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010042 * cluster-wide mechanism as it must be the same on all nodes of the cluster
43 * for the DLM to function.
44 *
45 */
46
47#include <asm/ioctls.h>
48#include <net/sock.h>
49#include <net/tcp.h>
50#include <linux/pagemap.h>
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010051#include <linux/file.h>
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -050052#include <linux/mutex.h>
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010053#include <linux/sctp.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090054#include <linux/slab.h>
Benjamin Poirier2f2d76c2012-03-08 05:55:59 +000055#include <net/sctp/sctp.h>
Joe Perches44ad5322009-01-22 13:24:49 -080056#include <net/ipv6.h>
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010057
58#include "dlm_internal.h"
59#include "lowcomms.h"
60#include "midcomms.h"
61#include "config.h"
62
63#define NEEDED_RMEM (4*1024*1024)
Christine Caulfield5e9ccc32009-01-28 12:57:40 -060064#define CONN_HASH_SIZE 32
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010065
Bob Petersonf92c8dd2010-11-12 11:15:20 -060066/* Number of messages to send before rescheduling */
67#define MAX_SEND_MSG_COUNT 25
68
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +010069struct cbuf {
70 unsigned int base;
71 unsigned int len;
72 unsigned int mask;
73};
74
75static void cbuf_add(struct cbuf *cb, int n)
76{
77 cb->len += n;
78}
79
80static int cbuf_data(struct cbuf *cb)
81{
82 return ((cb->base + cb->len) & cb->mask);
83}
84
85static void cbuf_init(struct cbuf *cb, int size)
86{
87 cb->base = cb->len = 0;
88 cb->mask = size-1;
89}
90
91static void cbuf_eat(struct cbuf *cb, int n)
92{
93 cb->len -= n;
94 cb->base += n;
95 cb->base &= cb->mask;
96}
97
98static bool cbuf_empty(struct cbuf *cb)
99{
100 return cb->len == 0;
101}
102
103struct connection {
104 struct socket *sock; /* NULL if not connected */
105 uint32_t nodeid; /* So we know who we are in the list */
106 struct mutex sock_mutex;
107 unsigned long flags;
108#define CF_READ_PENDING 1
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +0000109#define CF_WRITE_PENDING 2
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100110#define CF_INIT_PENDING 4
111#define CF_IS_OTHERCON 5
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -0500112#define CF_CLOSE 6
David Millerb36930d2010-11-10 21:56:39 -0800113#define CF_APP_LIMITED 7
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000114#define CF_CLOSING 8
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100115 struct list_head writequeue; /* List of outgoing writequeue_entries */
116 spinlock_t writequeue_lock;
117 int (*rx_action) (struct connection *); /* What to do when active */
118 void (*connect_action) (struct connection *); /* What to do to connect */
119 struct page *rx_page;
120 struct cbuf cb;
121 int retries;
122#define MAX_CONNECT_RETRIES 3
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600123 struct hlist_node list;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100124 struct connection *othercon;
125 struct work_struct rwork; /* Receive workqueue */
126 struct work_struct swork; /* Send workqueue */
127};
128#define sock2con(x) ((struct connection *)(x)->sk_user_data)
129
130/* An entry waiting to be sent */
131struct writequeue_entry {
132 struct list_head list;
133 struct page *page;
134 int offset;
135 int len;
136 int end;
137 int users;
138 struct connection *con;
139};
140
David Teigland36b71a82012-07-26 12:44:30 -0500141struct dlm_node_addr {
142 struct list_head list;
143 int nodeid;
144 int addr_count;
Mike Christie98e1b602013-06-14 04:56:12 -0500145 int curr_addr_index;
David Teigland36b71a82012-07-26 12:44:30 -0500146 struct sockaddr_storage *addr[DLM_MAX_ADDR_COUNT];
147};
148
Bob Petersoncc661fc2017-09-12 08:55:23 +0000149static struct listen_sock_callbacks {
150 void (*sk_error_report)(struct sock *);
151 void (*sk_data_ready)(struct sock *);
152 void (*sk_state_change)(struct sock *);
153 void (*sk_write_space)(struct sock *);
154} listen_sock;
155
David Teigland36b71a82012-07-26 12:44:30 -0500156static LIST_HEAD(dlm_node_addrs);
157static DEFINE_SPINLOCK(dlm_node_addrs_spin);
158
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100159static struct sockaddr_storage *dlm_local_addr[DLM_MAX_ADDR_COUNT];
160static int dlm_local_count;
David Teigland513ef592012-03-30 11:46:08 -0500161static int dlm_allow_conn;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100162
163/* Work queues */
164static struct workqueue_struct *recv_workqueue;
165static struct workqueue_struct *send_workqueue;
166
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600167static struct hlist_head connection_hash[CONN_HASH_SIZE];
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -0500168static DEFINE_MUTEX(connections_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100169static struct kmem_cache *con_cache;
170
171static void process_recv_sockets(struct work_struct *work);
172static void process_send_sockets(struct work_struct *work);
173
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600174
175/* This is deliberately very simple because most clusters have simple
176 sequential nodeids, so we should be able to go straight to a connection
177 struct in the array */
178static inline int nodeid_hash(int nodeid)
179{
180 return nodeid & (CONN_HASH_SIZE-1);
181}
182
183static struct connection *__find_con(int nodeid)
184{
185 int r;
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600186 struct connection *con;
187
188 r = nodeid_hash(nodeid);
189
Sasha Levinb67bfe02013-02-27 17:06:00 -0800190 hlist_for_each_entry(con, &connection_hash[r], list) {
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600191 if (con->nodeid == nodeid)
192 return con;
193 }
194 return NULL;
195}
196
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100197/*
198 * If 'allocation' is zero then we don't attempt to create a new
199 * connection structure for this node.
200 */
201static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
202{
203 struct connection *con = NULL;
204 int r;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100205
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600206 con = __find_con(nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100207 if (con || !alloc)
208 return con;
209
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100210 con = kmem_cache_zalloc(con_cache, alloc);
211 if (!con)
212 return NULL;
213
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600214 r = nodeid_hash(nodeid);
215 hlist_add_head(&con->list, &connection_hash[r]);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100216
217 con->nodeid = nodeid;
218 mutex_init(&con->sock_mutex);
219 INIT_LIST_HEAD(&con->writequeue);
220 spin_lock_init(&con->writequeue_lock);
221 INIT_WORK(&con->swork, process_send_sockets);
222 INIT_WORK(&con->rwork, process_recv_sockets);
223
224 /* Setup action pointers for child sockets */
225 if (con->nodeid) {
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600226 struct connection *zerocon = __find_con(0);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100227
228 con->connect_action = zerocon->connect_action;
229 if (!con->rx_action)
230 con->rx_action = zerocon->rx_action;
231 }
232
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100233 return con;
234}
235
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600236/* Loop round all connections */
237static void foreach_conn(void (*conn_func)(struct connection *c))
238{
239 int i;
Sasha Levinb67bfe02013-02-27 17:06:00 -0800240 struct hlist_node *n;
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600241 struct connection *con;
242
243 for (i = 0; i < CONN_HASH_SIZE; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800244 hlist_for_each_entry_safe(con, n, &connection_hash[i], list)
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600245 conn_func(con);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -0600246 }
247}
248
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100249static struct connection *nodeid2con(int nodeid, gfp_t allocation)
250{
251 struct connection *con;
252
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -0500253 mutex_lock(&connections_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100254 con = __nodeid2con(nodeid, allocation);
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -0500255 mutex_unlock(&connections_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100256
257 return con;
258}
259
David Teigland36b71a82012-07-26 12:44:30 -0500260static struct dlm_node_addr *find_node_addr(int nodeid)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100261{
David Teigland36b71a82012-07-26 12:44:30 -0500262 struct dlm_node_addr *na;
263
264 list_for_each_entry(na, &dlm_node_addrs, list) {
265 if (na->nodeid == nodeid)
266 return na;
267 }
268 return NULL;
269}
270
271static int addr_compare(struct sockaddr_storage *x, struct sockaddr_storage *y)
272{
273 switch (x->ss_family) {
274 case AF_INET: {
275 struct sockaddr_in *sinx = (struct sockaddr_in *)x;
276 struct sockaddr_in *siny = (struct sockaddr_in *)y;
277 if (sinx->sin_addr.s_addr != siny->sin_addr.s_addr)
278 return 0;
279 if (sinx->sin_port != siny->sin_port)
280 return 0;
281 break;
282 }
283 case AF_INET6: {
284 struct sockaddr_in6 *sinx = (struct sockaddr_in6 *)x;
285 struct sockaddr_in6 *siny = (struct sockaddr_in6 *)y;
286 if (!ipv6_addr_equal(&sinx->sin6_addr, &siny->sin6_addr))
287 return 0;
288 if (sinx->sin6_port != siny->sin6_port)
289 return 0;
290 break;
291 }
292 default:
293 return 0;
294 }
295 return 1;
296}
297
298static int nodeid_to_addr(int nodeid, struct sockaddr_storage *sas_out,
Mike Christie98e1b602013-06-14 04:56:12 -0500299 struct sockaddr *sa_out, bool try_new_addr)
David Teigland36b71a82012-07-26 12:44:30 -0500300{
301 struct sockaddr_storage sas;
302 struct dlm_node_addr *na;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100303
304 if (!dlm_local_count)
305 return -1;
306
David Teigland36b71a82012-07-26 12:44:30 -0500307 spin_lock(&dlm_node_addrs_spin);
308 na = find_node_addr(nodeid);
Mike Christie98e1b602013-06-14 04:56:12 -0500309 if (na && na->addr_count) {
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300310 memcpy(&sas, na->addr[na->curr_addr_index],
311 sizeof(struct sockaddr_storage));
312
Mike Christie98e1b602013-06-14 04:56:12 -0500313 if (try_new_addr) {
314 na->curr_addr_index++;
315 if (na->curr_addr_index == na->addr_count)
316 na->curr_addr_index = 0;
317 }
Mike Christie98e1b602013-06-14 04:56:12 -0500318 }
David Teigland36b71a82012-07-26 12:44:30 -0500319 spin_unlock(&dlm_node_addrs_spin);
320
321 if (!na)
322 return -EEXIST;
323
324 if (!na->addr_count)
325 return -ENOENT;
326
327 if (sas_out)
328 memcpy(sas_out, &sas, sizeof(struct sockaddr_storage));
329
330 if (!sa_out)
331 return 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100332
333 if (dlm_local_addr[0]->ss_family == AF_INET) {
David Teigland36b71a82012-07-26 12:44:30 -0500334 struct sockaddr_in *in4 = (struct sockaddr_in *) &sas;
335 struct sockaddr_in *ret4 = (struct sockaddr_in *) sa_out;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100336 ret4->sin_addr.s_addr = in4->sin_addr.s_addr;
337 } else {
David Teigland36b71a82012-07-26 12:44:30 -0500338 struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &sas;
339 struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) sa_out;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000340 ret6->sin6_addr = in6->sin6_addr;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100341 }
342
343 return 0;
344}
345
David Teigland36b71a82012-07-26 12:44:30 -0500346static int addr_to_nodeid(struct sockaddr_storage *addr, int *nodeid)
347{
348 struct dlm_node_addr *na;
349 int rv = -EEXIST;
Mike Christie98e1b602013-06-14 04:56:12 -0500350 int addr_i;
David Teigland36b71a82012-07-26 12:44:30 -0500351
352 spin_lock(&dlm_node_addrs_spin);
353 list_for_each_entry(na, &dlm_node_addrs, list) {
354 if (!na->addr_count)
355 continue;
356
Mike Christie98e1b602013-06-14 04:56:12 -0500357 for (addr_i = 0; addr_i < na->addr_count; addr_i++) {
358 if (addr_compare(na->addr[addr_i], addr)) {
359 *nodeid = na->nodeid;
360 rv = 0;
361 goto unlock;
362 }
363 }
David Teigland36b71a82012-07-26 12:44:30 -0500364 }
Mike Christie98e1b602013-06-14 04:56:12 -0500365unlock:
David Teigland36b71a82012-07-26 12:44:30 -0500366 spin_unlock(&dlm_node_addrs_spin);
367 return rv;
368}
369
370int dlm_lowcomms_addr(int nodeid, struct sockaddr_storage *addr, int len)
371{
372 struct sockaddr_storage *new_addr;
373 struct dlm_node_addr *new_node, *na;
374
375 new_node = kzalloc(sizeof(struct dlm_node_addr), GFP_NOFS);
376 if (!new_node)
377 return -ENOMEM;
378
379 new_addr = kzalloc(sizeof(struct sockaddr_storage), GFP_NOFS);
380 if (!new_addr) {
381 kfree(new_node);
382 return -ENOMEM;
383 }
384
385 memcpy(new_addr, addr, len);
386
387 spin_lock(&dlm_node_addrs_spin);
388 na = find_node_addr(nodeid);
389 if (!na) {
390 new_node->nodeid = nodeid;
391 new_node->addr[0] = new_addr;
392 new_node->addr_count = 1;
393 list_add(&new_node->list, &dlm_node_addrs);
394 spin_unlock(&dlm_node_addrs_spin);
395 return 0;
396 }
397
398 if (na->addr_count >= DLM_MAX_ADDR_COUNT) {
399 spin_unlock(&dlm_node_addrs_spin);
400 kfree(new_addr);
401 kfree(new_node);
402 return -ENOSPC;
403 }
404
405 na->addr[na->addr_count++] = new_addr;
406 spin_unlock(&dlm_node_addrs_spin);
407 kfree(new_node);
408 return 0;
409}
410
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100411/* Data available on socket or listen socket received a connect */
David S. Miller676d2362014-04-11 16:15:36 -0400412static void lowcomms_data_ready(struct sock *sk)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100413{
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000414 struct connection *con;
415
416 read_lock_bh(&sk->sk_callback_lock);
417 con = sock2con(sk);
Patrick Caulfieldafb853f2007-06-01 10:07:26 -0500418 if (con && !test_and_set_bit(CF_READ_PENDING, &con->flags))
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100419 queue_work(recv_workqueue, &con->rwork);
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000420 read_unlock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100421}
422
423static void lowcomms_write_space(struct sock *sk)
424{
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000425 struct connection *con;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100426
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000427 read_lock_bh(&sk->sk_callback_lock);
428 con = sock2con(sk);
David Millerb36930d2010-11-10 21:56:39 -0800429 if (!con)
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000430 goto out;
David Millerb36930d2010-11-10 21:56:39 -0800431
432 clear_bit(SOCK_NOSPACE, &con->sock->flags);
433
434 if (test_and_clear_bit(CF_APP_LIMITED, &con->flags)) {
435 con->sock->sk->sk_write_pending--;
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800436 clear_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags);
David Millerb36930d2010-11-10 21:56:39 -0800437 }
438
Bob Peterson01da24d2017-09-12 08:55:14 +0000439 queue_work(send_workqueue, &con->swork);
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +0000440out:
441 read_unlock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100442}
443
444static inline void lowcomms_connect_sock(struct connection *con)
445{
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -0500446 if (test_bit(CF_CLOSE, &con->flags))
447 return;
Bob Peterson61d9102b2017-09-12 08:55:04 +0000448 queue_work(send_workqueue, &con->swork);
449 cond_resched();
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100450}
451
452static void lowcomms_state_change(struct sock *sk)
453{
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300454 /* SCTP layer is not calling sk_data_ready when the connection
455 * is done, so we catch the signal through here. Also, it
456 * doesn't switch socket state when entering shutdown, so we
457 * skip the write in that case.
458 */
459 if (sk->sk_shutdown) {
460 if (sk->sk_shutdown == RCV_SHUTDOWN)
461 lowcomms_data_ready(sk);
462 } else if (sk->sk_state == TCP_ESTABLISHED) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100463 lowcomms_write_space(sk);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300464 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100465}
466
Christine Caulfield391fbdc2009-05-07 10:54:16 -0500467int dlm_lowcomms_connect_node(int nodeid)
468{
469 struct connection *con;
470
471 if (nodeid == dlm_our_nodeid())
472 return 0;
473
474 con = nodeid2con(nodeid, GFP_NOFS);
475 if (!con)
476 return -ENOMEM;
477 lowcomms_connect_sock(con);
478 return 0;
479}
480
Bob Petersonb3a5bbfd72015-08-27 09:34:47 -0500481static void lowcomms_error_report(struct sock *sk)
482{
Bob Petersonb81171c2016-02-05 14:39:02 -0500483 struct connection *con;
Bob Petersonb3a5bbfd72015-08-27 09:34:47 -0500484 struct sockaddr_storage saddr;
Bob Peterson1a318332016-01-18 12:29:15 -0500485 int buflen;
Bob Petersonb81171c2016-02-05 14:39:02 -0500486 void (*orig_report)(struct sock *) = NULL;
Bob Petersonb3a5bbfd72015-08-27 09:34:47 -0500487
Bob Petersonb81171c2016-02-05 14:39:02 -0500488 read_lock_bh(&sk->sk_callback_lock);
489 con = sock2con(sk);
490 if (con == NULL)
491 goto out;
492
Bob Petersoncc661fc2017-09-12 08:55:23 +0000493 orig_report = listen_sock.sk_error_report;
Bob Peterson1a318332016-01-18 12:29:15 -0500494 if (con->sock == NULL ||
495 kernel_getpeername(con->sock, (struct sockaddr *)&saddr, &buflen)) {
Bob Petersonb3a5bbfd72015-08-27 09:34:47 -0500496 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
497 "sending to node %d, port %d, "
498 "sk_err=%d/%d\n", dlm_our_nodeid(),
499 con->nodeid, dlm_config.ci_tcp_port,
500 sk->sk_err, sk->sk_err_soft);
Bob Petersonb3a5bbfd72015-08-27 09:34:47 -0500501 } else if (saddr.ss_family == AF_INET) {
502 struct sockaddr_in *sin4 = (struct sockaddr_in *)&saddr;
503
504 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
505 "sending to node %d at %pI4, port %d, "
506 "sk_err=%d/%d\n", dlm_our_nodeid(),
507 con->nodeid, &sin4->sin_addr.s_addr,
508 dlm_config.ci_tcp_port, sk->sk_err,
509 sk->sk_err_soft);
510 } else {
511 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&saddr;
512
513 printk_ratelimited(KERN_ERR "dlm: node %d: socket error "
514 "sending to node %d at %u.%u.%u.%u, "
515 "port %d, sk_err=%d/%d\n", dlm_our_nodeid(),
516 con->nodeid, sin6->sin6_addr.s6_addr32[0],
517 sin6->sin6_addr.s6_addr32[1],
518 sin6->sin6_addr.s6_addr32[2],
519 sin6->sin6_addr.s6_addr32[3],
520 dlm_config.ci_tcp_port, sk->sk_err,
521 sk->sk_err_soft);
522 }
Bob Petersonb81171c2016-02-05 14:39:02 -0500523out:
524 read_unlock_bh(&sk->sk_callback_lock);
525 if (orig_report)
526 orig_report(sk);
527}
528
529/* Note: sk_callback_lock must be locked before calling this function. */
Bob Petersoncc661fc2017-09-12 08:55:23 +0000530static void save_listen_callbacks(struct socket *sock)
Bob Petersonb81171c2016-02-05 14:39:02 -0500531{
Bob Petersoncc661fc2017-09-12 08:55:23 +0000532 struct sock *sk = sock->sk;
533
534 listen_sock.sk_data_ready = sk->sk_data_ready;
535 listen_sock.sk_state_change = sk->sk_state_change;
536 listen_sock.sk_write_space = sk->sk_write_space;
537 listen_sock.sk_error_report = sk->sk_error_report;
Bob Petersonb81171c2016-02-05 14:39:02 -0500538}
539
Bob Petersoncc661fc2017-09-12 08:55:23 +0000540static void restore_callbacks(struct socket *sock)
Bob Petersonb81171c2016-02-05 14:39:02 -0500541{
Bob Petersoncc661fc2017-09-12 08:55:23 +0000542 struct sock *sk = sock->sk;
543
Bob Petersonb81171c2016-02-05 14:39:02 -0500544 write_lock_bh(&sk->sk_callback_lock);
Bob Petersonb81171c2016-02-05 14:39:02 -0500545 sk->sk_user_data = NULL;
Bob Petersoncc661fc2017-09-12 08:55:23 +0000546 sk->sk_data_ready = listen_sock.sk_data_ready;
547 sk->sk_state_change = listen_sock.sk_state_change;
548 sk->sk_write_space = listen_sock.sk_write_space;
549 sk->sk_error_report = listen_sock.sk_error_report;
Bob Petersonb81171c2016-02-05 14:39:02 -0500550 write_unlock_bh(&sk->sk_callback_lock);
Bob Petersonb3a5bbfd72015-08-27 09:34:47 -0500551}
552
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100553/* Make a socket active */
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000554static void add_sock(struct socket *sock, struct connection *con)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100555{
Bob Petersonb81171c2016-02-05 14:39:02 -0500556 struct sock *sk = sock->sk;
557
558 write_lock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100559 con->sock = sock;
560
Bob Petersonb81171c2016-02-05 14:39:02 -0500561 sk->sk_user_data = con;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100562 /* Install a data_ready callback */
Bob Petersonb81171c2016-02-05 14:39:02 -0500563 sk->sk_data_ready = lowcomms_data_ready;
564 sk->sk_write_space = lowcomms_write_space;
565 sk->sk_state_change = lowcomms_state_change;
566 sk->sk_allocation = GFP_NOFS;
567 sk->sk_error_report = lowcomms_error_report;
568 write_unlock_bh(&sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100569}
570
571/* Add the port number to an IPv6 or 4 sockaddr and return the address
572 length */
573static void make_sockaddr(struct sockaddr_storage *saddr, uint16_t port,
574 int *addr_len)
575{
576 saddr->ss_family = dlm_local_addr[0]->ss_family;
577 if (saddr->ss_family == AF_INET) {
578 struct sockaddr_in *in4_addr = (struct sockaddr_in *)saddr;
579 in4_addr->sin_port = cpu_to_be16(port);
580 *addr_len = sizeof(struct sockaddr_in);
581 memset(&in4_addr->sin_zero, 0, sizeof(in4_addr->sin_zero));
582 } else {
583 struct sockaddr_in6 *in6_addr = (struct sockaddr_in6 *)saddr;
584 in6_addr->sin6_port = cpu_to_be16(port);
585 *addr_len = sizeof(struct sockaddr_in6);
586 }
Patrick Caulfield01c8cab2007-07-17 16:53:15 +0100587 memset((char *)saddr + *addr_len, 0, sizeof(struct sockaddr_storage) - *addr_len);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100588}
589
590/* Close a remote connection and tidy up */
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300591static void close_connection(struct connection *con, bool and_other,
592 bool tx, bool rx)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100593{
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000594 bool closing = test_and_set_bit(CF_CLOSING, &con->flags);
595
596 if (tx && !closing && cancel_work_sync(&con->swork))
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300597 log_print("canceled swork for node %d", con->nodeid);
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000598 if (rx && !closing && cancel_work_sync(&con->rwork))
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300599 log_print("canceled rwork for node %d", con->nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100600
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300601 mutex_lock(&con->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100602 if (con->sock) {
Bob Petersoncc661fc2017-09-12 08:55:23 +0000603 restore_callbacks(con->sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100604 sock_release(con->sock);
605 con->sock = NULL;
606 }
607 if (con->othercon && and_other) {
608 /* Will only re-enter once. */
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -0300609 close_connection(con->othercon, false, true, true);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100610 }
611 if (con->rx_page) {
612 __free_page(con->rx_page);
613 con->rx_page = NULL;
614 }
Patrick Caulfield9e5f2822007-08-02 14:58:14 +0100615
Patrick Caulfield61d96be02007-08-20 15:13:38 +0100616 con->retries = 0;
617 mutex_unlock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jpb2a66622017-09-12 08:55:50 +0000618 clear_bit(CF_CLOSING, &con->flags);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100619}
620
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100621/* Data received from remote end */
622static int receive_from_sock(struct connection *con)
623{
624 int ret = 0;
625 struct msghdr msg = {};
626 struct kvec iov[2];
627 unsigned len;
628 int r;
629 int call_again_soon = 0;
630 int nvec;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100631
632 mutex_lock(&con->sock_mutex);
633
634 if (con->sock == NULL) {
635 ret = -EAGAIN;
636 goto out_close;
637 }
Marcelo Ricardo Leitneracee4e52015-08-11 19:22:24 -0300638 if (con->nodeid == 0) {
639 ret = -EINVAL;
640 goto out_close;
641 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100642
643 if (con->rx_page == NULL) {
644 /*
645 * This doesn't need to be atomic, but I think it should
646 * improve performance if it is.
647 */
648 con->rx_page = alloc_page(GFP_ATOMIC);
649 if (con->rx_page == NULL)
650 goto out_resched;
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300651 cbuf_init(&con->cb, PAGE_SIZE);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100652 }
653
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100654 /*
655 * iov[0] is the bit of the circular buffer between the current end
656 * point (cb.base + cb.len) and the end of the buffer.
657 */
658 iov[0].iov_len = con->cb.base - cbuf_data(&con->cb);
659 iov[0].iov_base = page_address(con->rx_page) + cbuf_data(&con->cb);
660 iov[1].iov_len = 0;
661 nvec = 1;
662
663 /*
664 * iov[1] is the bit of the circular buffer between the start of the
665 * buffer and the start of the currently used section (cb.base)
666 */
667 if (cbuf_data(&con->cb) >= con->cb.base) {
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300668 iov[0].iov_len = PAGE_SIZE - cbuf_data(&con->cb);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100669 iov[1].iov_len = con->cb.base;
670 iov[1].iov_base = page_address(con->rx_page);
671 nvec = 2;
672 }
673 len = iov[0].iov_len + iov[1].iov_len;
674
675 r = ret = kernel_recvmsg(con->sock, &msg, iov, nvec, len,
676 MSG_DONTWAIT | MSG_NOSIGNAL);
677 if (ret <= 0)
678 goto out_close;
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300679 else if (ret == len)
680 call_again_soon = 1;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100681
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100682 cbuf_add(&con->cb, ret);
683 ret = dlm_process_incoming_buffer(con->nodeid,
684 page_address(con->rx_page),
685 con->cb.base, con->cb.len,
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +0300686 PAGE_SIZE);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100687 if (ret == -EBADMSG) {
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300688 log_print("lowcomms: addr=%p, base=%u, len=%u, read=%d",
689 page_address(con->rx_page), con->cb.base,
690 con->cb.len, r);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100691 }
692 if (ret < 0)
693 goto out_close;
694 cbuf_eat(&con->cb, ret);
695
696 if (cbuf_empty(&con->cb) && !call_again_soon) {
697 __free_page(con->rx_page);
698 con->rx_page = NULL;
699 }
700
701 if (call_again_soon)
702 goto out_resched;
703 mutex_unlock(&con->sock_mutex);
704 return 0;
705
706out_resched:
707 if (!test_and_set_bit(CF_READ_PENDING, &con->flags))
708 queue_work(recv_workqueue, &con->rwork);
709 mutex_unlock(&con->sock_mutex);
710 return -EAGAIN;
711
712out_close:
713 mutex_unlock(&con->sock_mutex);
Patrick Caulfield9e5f2822007-08-02 14:58:14 +0100714 if (ret != -EAGAIN) {
tsutomu.owa@toshiba.co.jpc553e172017-09-12 08:56:15 +0000715 close_connection(con, true, true, false);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100716 /* Reconnect when there is something to send */
717 }
718 /* Don't return success if we really got EOF */
719 if (ret == 0)
720 ret = -EAGAIN;
721
722 return ret;
723}
724
725/* Listening socket is busy, accept a connection */
726static int tcp_accept_from_sock(struct connection *con)
727{
728 int result;
729 struct sockaddr_storage peeraddr;
730 struct socket *newsock;
731 int len;
732 int nodeid;
733 struct connection *newcon;
734 struct connection *addcon;
735
David Teigland513ef592012-03-30 11:46:08 -0500736 mutex_lock(&connections_lock);
737 if (!dlm_allow_conn) {
738 mutex_unlock(&connections_lock);
739 return -1;
740 }
741 mutex_unlock(&connections_lock);
742
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100743 mutex_lock_nested(&con->sock_mutex, 0);
744
tsutomu.owa@toshiba.co.jp3421fb12017-09-12 09:01:38 +0000745 if (!con->sock) {
746 mutex_unlock(&con->sock_mutex);
747 return -ENOTCONN;
748 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100749
tsutomu.owa@toshiba.co.jp3421fb12017-09-12 09:01:38 +0000750 result = kernel_accept(con->sock, &newsock, O_NONBLOCK);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100751 if (result < 0)
752 goto accept_err;
753
754 /* Get the connected socket's peer */
755 memset(&peeraddr, 0, sizeof(peeraddr));
756 if (newsock->ops->getname(newsock, (struct sockaddr *)&peeraddr,
757 &len, 2)) {
758 result = -ECONNABORTED;
759 goto accept_err;
760 }
761
762 /* Get the new node's NODEID */
763 make_sockaddr(&peeraddr, 0, &len);
David Teigland36b71a82012-07-26 12:44:30 -0500764 if (addr_to_nodeid(&peeraddr, &nodeid)) {
Masatake YAMATObcaadf52011-07-04 12:25:51 +0900765 unsigned char *b=(unsigned char *)&peeraddr;
David Teigland617e82e2007-04-26 13:46:49 -0500766 log_print("connect from non cluster node");
Masatake YAMATObcaadf52011-07-04 12:25:51 +0900767 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
768 b, sizeof(struct sockaddr_storage));
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100769 sock_release(newsock);
770 mutex_unlock(&con->sock_mutex);
771 return -1;
772 }
773
774 log_print("got connection from %d", nodeid);
775
776 /* Check to see if we already have a connection to this node. This
777 * could happen if the two nodes initiate a connection at roughly
778 * the same time and the connections cross on the wire.
779 * In this case we store the incoming one in "othercon"
780 */
David Teigland748285c2009-05-15 10:50:57 -0500781 newcon = nodeid2con(nodeid, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100782 if (!newcon) {
783 result = -ENOMEM;
784 goto accept_err;
785 }
786 mutex_lock_nested(&newcon->sock_mutex, 1);
787 if (newcon->sock) {
788 struct connection *othercon = newcon->othercon;
789
790 if (!othercon) {
David Teigland748285c2009-05-15 10:50:57 -0500791 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100792 if (!othercon) {
David Teigland617e82e2007-04-26 13:46:49 -0500793 log_print("failed to allocate incoming socket");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100794 mutex_unlock(&newcon->sock_mutex);
795 result = -ENOMEM;
796 goto accept_err;
797 }
798 othercon->nodeid = nodeid;
799 othercon->rx_action = receive_from_sock;
800 mutex_init(&othercon->sock_mutex);
801 INIT_WORK(&othercon->swork, process_send_sockets);
802 INIT_WORK(&othercon->rwork, process_recv_sockets);
803 set_bit(CF_IS_OTHERCON, &othercon->flags);
Patrick Caulfield61d96be02007-08-20 15:13:38 +0100804 }
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000805 mutex_lock_nested(&othercon->sock_mutex, 2);
Patrick Caulfield61d96be02007-08-20 15:13:38 +0100806 if (!othercon->sock) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100807 newcon->othercon = othercon;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000808 add_sock(newsock, othercon);
Patrick Caulfield97d84832007-06-27 11:36:23 +0100809 addcon = othercon;
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000810 mutex_unlock(&othercon->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100811 }
Patrick Caulfield97d84832007-06-27 11:36:23 +0100812 else {
813 printk("Extra connection from node %d attempted\n", nodeid);
814 result = -EAGAIN;
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000815 mutex_unlock(&othercon->sock_mutex);
akpm@linux-foundation.orgf4fadb23c2007-06-27 14:43:37 -0700816 mutex_unlock(&newcon->sock_mutex);
Patrick Caulfield97d84832007-06-27 11:36:23 +0100817 goto accept_err;
818 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100819 }
820 else {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100821 newcon->rx_action = receive_from_sock;
Bob Peterson3735b4b2016-09-23 14:23:26 -0400822 /* accept copies the sk after we've saved the callbacks, so we
823 don't want to save them a second time or comm errors will
824 result in calling sk_error_report recursively. */
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000825 add_sock(newsock, newcon);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100826 addcon = newcon;
827 }
828
829 mutex_unlock(&newcon->sock_mutex);
830
831 /*
832 * Add it to the active queue in case we got data
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300833 * between processing the accept adding the socket
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100834 * to the read_sockets list
835 */
836 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
837 queue_work(recv_workqueue, &addcon->rwork);
838 mutex_unlock(&con->sock_mutex);
839
840 return 0;
841
842accept_err:
843 mutex_unlock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jp3421fb12017-09-12 09:01:38 +0000844 if (newsock)
845 sock_release(newsock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100846
847 if (result != -EAGAIN)
David Teigland617e82e2007-04-26 13:46:49 -0500848 log_print("error accepting connection from node: %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100849 return result;
850}
851
kbuild test robot18df8a82015-08-18 04:50:36 +0800852static int sctp_accept_from_sock(struct connection *con)
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300853{
854 /* Check that the new node is in the lockspace */
855 struct sctp_prim prim;
856 int nodeid;
857 int prim_len, ret;
858 int addr_len;
859 struct connection *newcon;
860 struct connection *addcon;
861 struct socket *newsock;
862
863 mutex_lock(&connections_lock);
864 if (!dlm_allow_conn) {
865 mutex_unlock(&connections_lock);
866 return -1;
867 }
868 mutex_unlock(&connections_lock);
869
870 mutex_lock_nested(&con->sock_mutex, 0);
871
872 ret = kernel_accept(con->sock, &newsock, O_NONBLOCK);
873 if (ret < 0)
874 goto accept_err;
875
876 memset(&prim, 0, sizeof(struct sctp_prim));
877 prim_len = sizeof(struct sctp_prim);
878
879 ret = kernel_getsockopt(newsock, IPPROTO_SCTP, SCTP_PRIMARY_ADDR,
880 (char *)&prim, &prim_len);
881 if (ret < 0) {
882 log_print("getsockopt/sctp_primary_addr failed: %d", ret);
883 goto accept_err;
884 }
885
886 make_sockaddr(&prim.ssp_addr, 0, &addr_len);
Wei Yongjun26c1ec22016-10-22 14:37:36 +0000887 ret = addr_to_nodeid(&prim.ssp_addr, &nodeid);
888 if (ret) {
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300889 unsigned char *b = (unsigned char *)&prim.ssp_addr;
890
891 log_print("reject connect from unknown addr");
892 print_hex_dump_bytes("ss: ", DUMP_PREFIX_NONE,
893 b, sizeof(struct sockaddr_storage));
894 goto accept_err;
895 }
896
897 newcon = nodeid2con(nodeid, GFP_NOFS);
898 if (!newcon) {
899 ret = -ENOMEM;
900 goto accept_err;
901 }
902
903 mutex_lock_nested(&newcon->sock_mutex, 1);
904
905 if (newcon->sock) {
906 struct connection *othercon = newcon->othercon;
907
908 if (!othercon) {
909 othercon = kmem_cache_zalloc(con_cache, GFP_NOFS);
910 if (!othercon) {
911 log_print("failed to allocate incoming socket");
912 mutex_unlock(&newcon->sock_mutex);
913 ret = -ENOMEM;
914 goto accept_err;
915 }
916 othercon->nodeid = nodeid;
917 othercon->rx_action = receive_from_sock;
918 mutex_init(&othercon->sock_mutex);
919 INIT_WORK(&othercon->swork, process_send_sockets);
920 INIT_WORK(&othercon->rwork, process_recv_sockets);
921 set_bit(CF_IS_OTHERCON, &othercon->flags);
922 }
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000923 mutex_lock_nested(&othercon->sock_mutex, 2);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300924 if (!othercon->sock) {
925 newcon->othercon = othercon;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000926 add_sock(newsock, othercon);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300927 addcon = othercon;
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000928 mutex_unlock(&othercon->sock_mutex);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300929 } else {
930 printk("Extra connection from node %d attempted\n", nodeid);
931 ret = -EAGAIN;
tsutomu.owa@toshiba.co.jpc7355822017-09-12 08:56:00 +0000932 mutex_unlock(&othercon->sock_mutex);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300933 mutex_unlock(&newcon->sock_mutex);
934 goto accept_err;
935 }
936 } else {
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300937 newcon->rx_action = receive_from_sock;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +0000938 add_sock(newsock, newcon);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300939 addcon = newcon;
940 }
941
942 log_print("connected to %d", nodeid);
943
944 mutex_unlock(&newcon->sock_mutex);
945
946 /*
947 * Add it to the active queue in case we got data
948 * between processing the accept adding the socket
949 * to the read_sockets list
950 */
951 if (!test_and_set_bit(CF_READ_PENDING, &addcon->flags))
952 queue_work(recv_workqueue, &addcon->rwork);
953 mutex_unlock(&con->sock_mutex);
954
955 return 0;
956
957accept_err:
958 mutex_unlock(&con->sock_mutex);
959 if (newsock)
960 sock_release(newsock);
961 if (ret != -EAGAIN)
962 log_print("error accepting connection from node: %d", ret);
963
964 return ret;
965}
966
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +0100967static void free_entry(struct writequeue_entry *e)
968{
969 __free_page(e->page);
970 kfree(e);
971}
972
Mike Christie5d689872013-06-14 04:56:13 -0500973/*
974 * writequeue_entry_complete - try to delete and free write queue entry
975 * @e: write queue entry to try to delete
976 * @completed: bytes completed
977 *
978 * writequeue_lock must be held.
979 */
980static void writequeue_entry_complete(struct writequeue_entry *e, int completed)
981{
982 e->offset += completed;
983 e->len -= completed;
984
985 if (e->len == 0 && e->users == 0) {
986 list_del(&e->list);
987 free_entry(e);
988 }
989}
990
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -0300991/*
992 * sctp_bind_addrs - bind a SCTP socket to all our addresses
993 */
994static int sctp_bind_addrs(struct connection *con, uint16_t port)
995{
996 struct sockaddr_storage localaddr;
997 int i, addr_len, result = 0;
998
999 for (i = 0; i < dlm_local_count; i++) {
1000 memcpy(&localaddr, dlm_local_addr[i], sizeof(localaddr));
1001 make_sockaddr(&localaddr, port, &addr_len);
1002
1003 if (!i)
1004 result = kernel_bind(con->sock,
1005 (struct sockaddr *)&localaddr,
1006 addr_len);
1007 else
1008 result = kernel_setsockopt(con->sock, SOL_SCTP,
1009 SCTP_SOCKOPT_BINDX_ADD,
1010 (char *)&localaddr, addr_len);
1011
1012 if (result < 0) {
1013 log_print("Can't bind to %d addr number %d, %d.\n",
1014 port, i + 1, result);
1015 break;
1016 }
1017 }
1018 return result;
1019}
1020
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001021/* Initiate an SCTP association.
1022 This is a special case of send_to_sock() in that we don't yet have a
1023 peeled-off socket for this association, so we use the listening socket
1024 and add the primary IP address of the remote node.
1025 */
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001026static void sctp_connect_to_sock(struct connection *con)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001027{
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001028 struct sockaddr_storage daddr;
1029 int one = 1;
1030 int result;
1031 int addr_len;
1032 struct socket *sock;
1033
1034 if (con->nodeid == 0) {
1035 log_print("attempt to connect sock 0 foiled");
1036 return;
1037 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001038
Mike Christie5d689872013-06-14 04:56:13 -05001039 mutex_lock(&con->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001040
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001041 /* Some odd races can cause double-connects, ignore them */
1042 if (con->retries++ > MAX_CONNECT_RETRIES)
1043 goto out;
1044
1045 if (con->sock) {
1046 log_print("node %d already connected.", con->nodeid);
1047 goto out;
1048 }
1049
1050 memset(&daddr, 0, sizeof(daddr));
1051 result = nodeid_to_addr(con->nodeid, &daddr, NULL, true);
1052 if (result < 0) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001053 log_print("no address for nodeid %d", con->nodeid);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001054 goto out;
David Teigland04bedd72009-09-18 14:31:47 -05001055 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001056
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001057 /* Create a socket to communicate with */
1058 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1059 SOCK_STREAM, IPPROTO_SCTP, &sock);
1060 if (result < 0)
1061 goto socket_err;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001062
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001063 con->rx_action = receive_from_sock;
1064 con->connect_action = sctp_connect_to_sock;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +00001065 add_sock(sock, con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001066
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001067 /* Bind to all addresses. */
1068 if (sctp_bind_addrs(con, 0))
1069 goto bind_err;
Mike Christie98e1b602013-06-14 04:56:12 -05001070
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001071 make_sockaddr(&daddr, dlm_config.ci_tcp_port, &addr_len);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001072
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001073 log_print("connecting to %d", con->nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001074
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001075 /* Turn off Nagle's algorithm */
1076 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1077 sizeof(one));
1078
1079 result = sock->ops->connect(sock, (struct sockaddr *)&daddr, addr_len,
1080 O_NONBLOCK);
1081 if (result == -EINPROGRESS)
1082 result = 0;
1083 if (result == 0)
1084 goto out;
1085
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001086bind_err:
1087 con->sock = NULL;
1088 sock_release(sock);
1089
1090socket_err:
1091 /*
1092 * Some errors are fatal and this list might need adjusting. For other
1093 * errors we try again until the max number of retries is reached.
1094 */
1095 if (result != -EHOSTUNREACH &&
1096 result != -ENETUNREACH &&
1097 result != -ENETDOWN &&
1098 result != -EINVAL &&
1099 result != -EPROTONOSUPPORT) {
1100 log_print("connect %d try %d error %d", con->nodeid,
1101 con->retries, result);
1102 mutex_unlock(&con->sock_mutex);
1103 msleep(1000);
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001104 lowcomms_connect_sock(con);
1105 return;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001106 }
Mike Christie5d689872013-06-14 04:56:13 -05001107
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001108out:
Mike Christie5d689872013-06-14 04:56:13 -05001109 mutex_unlock(&con->sock_mutex);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001110}
1111
1112/* Connect a new socket to its peer */
1113static void tcp_connect_to_sock(struct connection *con)
1114{
Lon Hohberger6bd8fed2007-10-25 18:51:54 -04001115 struct sockaddr_storage saddr, src_addr;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001116 int addr_len;
Casey Dahlina89d63a2009-07-14 12:17:51 -05001117 struct socket *sock = NULL;
David Teiglandcb2d45d2010-11-12 11:12:55 -06001118 int one = 1;
David Teigland36b71a82012-07-26 12:44:30 -05001119 int result;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001120
1121 if (con->nodeid == 0) {
1122 log_print("attempt to connect sock 0 foiled");
1123 return;
1124 }
1125
1126 mutex_lock(&con->sock_mutex);
1127 if (con->retries++ > MAX_CONNECT_RETRIES)
1128 goto out;
1129
1130 /* Some odd races can cause double-connects, ignore them */
David Teigland36b71a82012-07-26 12:44:30 -05001131 if (con->sock)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001132 goto out;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001133
1134 /* Create a socket to communicate with */
Eric W. Biedermaneeb1bd52015-05-08 21:08:05 -05001135 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1136 SOCK_STREAM, IPPROTO_TCP, &sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001137 if (result < 0)
1138 goto out_err;
1139
1140 memset(&saddr, 0, sizeof(saddr));
Mike Christie98e1b602013-06-14 04:56:12 -05001141 result = nodeid_to_addr(con->nodeid, &saddr, NULL, false);
David Teigland36b71a82012-07-26 12:44:30 -05001142 if (result < 0) {
1143 log_print("no address for nodeid %d", con->nodeid);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001144 goto out_err;
David Teigland36b71a82012-07-26 12:44:30 -05001145 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001146
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001147 con->rx_action = receive_from_sock;
1148 con->connect_action = tcp_connect_to_sock;
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +00001149 add_sock(sock, con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001150
Lon Hohberger6bd8fed2007-10-25 18:51:54 -04001151 /* Bind to our cluster-known address connecting to avoid
1152 routing problems */
1153 memcpy(&src_addr, dlm_local_addr[0], sizeof(src_addr));
1154 make_sockaddr(&src_addr, 0, &addr_len);
1155 result = sock->ops->bind(sock, (struct sockaddr *) &src_addr,
1156 addr_len);
1157 if (result < 0) {
1158 log_print("could not bind for connect: %d", result);
1159 /* This *may* not indicate a critical error */
1160 }
1161
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001162 make_sockaddr(&saddr, dlm_config.ci_tcp_port, &addr_len);
1163
1164 log_print("connecting to %d", con->nodeid);
David Teiglandcb2d45d2010-11-12 11:12:55 -06001165
1166 /* Turn off Nagle's algorithm */
1167 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1168 sizeof(one));
1169
David Teigland36b71a82012-07-26 12:44:30 -05001170 result = sock->ops->connect(sock, (struct sockaddr *)&saddr, addr_len,
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001171 O_NONBLOCK);
1172 if (result == -EINPROGRESS)
1173 result = 0;
1174 if (result == 0)
1175 goto out;
1176
1177out_err:
1178 if (con->sock) {
1179 sock_release(con->sock);
1180 con->sock = NULL;
Casey Dahlina89d63a2009-07-14 12:17:51 -05001181 } else if (sock) {
1182 sock_release(sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001183 }
1184 /*
1185 * Some errors are fatal and this list might need adjusting. For other
1186 * errors we try again until the max number of retries is reached.
1187 */
David Teigland36b71a82012-07-26 12:44:30 -05001188 if (result != -EHOSTUNREACH &&
1189 result != -ENETUNREACH &&
1190 result != -ENETDOWN &&
1191 result != -EINVAL &&
1192 result != -EPROTONOSUPPORT) {
1193 log_print("connect %d try %d error %d", con->nodeid,
1194 con->retries, result);
1195 mutex_unlock(&con->sock_mutex);
1196 msleep(1000);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001197 lowcomms_connect_sock(con);
David Teigland36b71a82012-07-26 12:44:30 -05001198 return;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001199 }
1200out:
1201 mutex_unlock(&con->sock_mutex);
1202 return;
1203}
1204
1205static struct socket *tcp_create_listen_sock(struct connection *con,
1206 struct sockaddr_storage *saddr)
1207{
1208 struct socket *sock = NULL;
1209 int result = 0;
1210 int one = 1;
1211 int addr_len;
1212
1213 if (dlm_local_addr[0]->ss_family == AF_INET)
1214 addr_len = sizeof(struct sockaddr_in);
1215 else
1216 addr_len = sizeof(struct sockaddr_in6);
1217
1218 /* Create a socket to communicate with */
Eric W. Biedermaneeb1bd52015-05-08 21:08:05 -05001219 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
1220 SOCK_STREAM, IPPROTO_TCP, &sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001221 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001222 log_print("Can't create listening comms socket");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001223 goto create_out;
1224 }
1225
David Teiglandcb2d45d2010-11-12 11:12:55 -06001226 /* Turn off Nagle's algorithm */
1227 kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY, (char *)&one,
1228 sizeof(one));
1229
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001230 result = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
1231 (char *)&one, sizeof(one));
1232
1233 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001234 log_print("Failed to set SO_REUSEADDR on socket: %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001235 }
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001236 write_lock_bh(&sock->sk->sk_callback_lock);
Bob Petersonb81171c2016-02-05 14:39:02 -05001237 sock->sk->sk_user_data = con;
Bob Petersoncc661fc2017-09-12 08:55:23 +00001238 save_listen_callbacks(sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001239 con->rx_action = tcp_accept_from_sock;
1240 con->connect_action = tcp_connect_to_sock;
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001241 write_unlock_bh(&sock->sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001242
1243 /* Bind to our port */
1244 make_sockaddr(saddr, dlm_config.ci_tcp_port, &addr_len);
1245 result = sock->ops->bind(sock, (struct sockaddr *) saddr, addr_len);
1246 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001247 log_print("Can't bind to port %d", dlm_config.ci_tcp_port);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001248 sock_release(sock);
1249 sock = NULL;
1250 con->sock = NULL;
1251 goto create_out;
1252 }
1253 result = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
1254 (char *)&one, sizeof(one));
1255 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001256 log_print("Set keepalive failed: %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001257 }
1258
1259 result = sock->ops->listen(sock, 5);
1260 if (result < 0) {
David Teigland617e82e2007-04-26 13:46:49 -05001261 log_print("Can't listen on port %d", dlm_config.ci_tcp_port);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001262 sock_release(sock);
1263 sock = NULL;
1264 goto create_out;
1265 }
1266
1267create_out:
1268 return sock;
1269}
1270
1271/* Get local addresses */
1272static void init_local(void)
1273{
1274 struct sockaddr_storage sas, *addr;
1275 int i;
1276
Patrick Caulfield30d3a232007-04-23 16:26:21 +01001277 dlm_local_count = 0;
David Teigland1b189b82012-03-21 09:18:34 -05001278 for (i = 0; i < DLM_MAX_ADDR_COUNT; i++) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001279 if (dlm_our_addr(&sas, i))
1280 break;
1281
Amitoj Kaur Chawla5c93f562016-06-23 10:22:01 +05301282 addr = kmemdup(&sas, sizeof(*addr), GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001283 if (!addr)
1284 break;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001285 dlm_local_addr[dlm_local_count++] = addr;
1286 }
1287}
1288
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001289/* Initialise SCTP socket and bind to all interfaces */
1290static int sctp_listen_for_all(void)
1291{
1292 struct socket *sock = NULL;
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001293 int result = -EINVAL;
David Teigland573c24c2009-11-30 16:34:43 -06001294 struct connection *con = nodeid2con(0, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001295 int bufsize = NEEDED_RMEM;
Mike Christie86e92ad2013-06-14 04:56:14 -05001296 int one = 1;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001297
1298 if (!con)
1299 return -ENOMEM;
1300
1301 log_print("Using SCTP for communications");
1302
Eric W. Biedermaneeb1bd52015-05-08 21:08:05 -05001303 result = sock_create_kern(&init_net, dlm_local_addr[0]->ss_family,
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001304 SOCK_STREAM, IPPROTO_SCTP, &sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001305 if (result < 0) {
1306 log_print("Can't create comms socket, check SCTP is loaded");
1307 goto out;
1308 }
1309
David S. Millerdf61c952007-11-06 23:48:57 -08001310 result = kernel_setsockopt(sock, SOL_SOCKET, SO_RCVBUFFORCE,
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001311 (char *)&bufsize, sizeof(bufsize));
1312 if (result)
David Teigland617e82e2007-04-26 13:46:49 -05001313 log_print("Error increasing buffer space on socket %d", result);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001314
Mike Christie86e92ad2013-06-14 04:56:14 -05001315 result = kernel_setsockopt(sock, SOL_SCTP, SCTP_NODELAY, (char *)&one,
1316 sizeof(one));
1317 if (result < 0)
1318 log_print("Could not set SCTP NODELAY error %d\n", result);
1319
Bob Petersonb81171c2016-02-05 14:39:02 -05001320 write_lock_bh(&sock->sk->sk_callback_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001321 /* Init con struct */
1322 sock->sk->sk_user_data = con;
Bob Petersoncc661fc2017-09-12 08:55:23 +00001323 save_listen_callbacks(sock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001324 con->sock = sock;
1325 con->sock->sk->sk_data_ready = lowcomms_data_ready;
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001326 con->rx_action = sctp_accept_from_sock;
1327 con->connect_action = sctp_connect_to_sock;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001328
Bob Petersonb81171c2016-02-05 14:39:02 -05001329 write_unlock_bh(&sock->sk->sk_callback_lock);
1330
Marcelo Ricardo Leitneree44b4b2015-08-11 19:22:23 -03001331 /* Bind to all addresses. */
1332 if (sctp_bind_addrs(con, dlm_config.ci_tcp_port))
1333 goto create_delsock;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001334
1335 result = sock->ops->listen(sock, 5);
1336 if (result < 0) {
1337 log_print("Can't set socket listening");
1338 goto create_delsock;
1339 }
1340
1341 return 0;
1342
1343create_delsock:
1344 sock_release(sock);
1345 con->sock = NULL;
1346out:
1347 return result;
1348}
1349
1350static int tcp_listen_for_all(void)
1351{
1352 struct socket *sock = NULL;
David Teigland573c24c2009-11-30 16:34:43 -06001353 struct connection *con = nodeid2con(0, GFP_NOFS);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001354 int result = -EINVAL;
1355
1356 if (!con)
1357 return -ENOMEM;
1358
1359 /* We don't support multi-homed hosts */
1360 if (dlm_local_addr[1] != NULL) {
David Teigland617e82e2007-04-26 13:46:49 -05001361 log_print("TCP protocol can't handle multi-homed hosts, "
1362 "try SCTP");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001363 return -EINVAL;
1364 }
1365
1366 log_print("Using TCP for communications");
1367
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001368 sock = tcp_create_listen_sock(con, dlm_local_addr[0]);
1369 if (sock) {
tsutomu.owa@toshiba.co.jp988419a2017-09-12 08:55:32 +00001370 add_sock(sock, con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001371 result = 0;
1372 }
1373 else {
1374 result = -EADDRINUSE;
1375 }
1376
1377 return result;
1378}
1379
1380
1381
1382static struct writequeue_entry *new_writequeue_entry(struct connection *con,
1383 gfp_t allocation)
1384{
1385 struct writequeue_entry *entry;
1386
1387 entry = kmalloc(sizeof(struct writequeue_entry), allocation);
1388 if (!entry)
1389 return NULL;
1390
1391 entry->page = alloc_page(allocation);
1392 if (!entry->page) {
1393 kfree(entry);
1394 return NULL;
1395 }
1396
1397 entry->offset = 0;
1398 entry->len = 0;
1399 entry->end = 0;
1400 entry->users = 0;
1401 entry->con = con;
1402
1403 return entry;
1404}
1405
David Teigland617e82e2007-04-26 13:46:49 -05001406void *dlm_lowcomms_get_buffer(int nodeid, int len, gfp_t allocation, char **ppc)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001407{
1408 struct connection *con;
1409 struct writequeue_entry *e;
1410 int offset = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001411
1412 con = nodeid2con(nodeid, allocation);
1413 if (!con)
1414 return NULL;
1415
1416 spin_lock(&con->writequeue_lock);
1417 e = list_entry(con->writequeue.prev, struct writequeue_entry, list);
1418 if ((&e->list == &con->writequeue) ||
Kirill A. Shutemov09cbfea2016-04-01 15:29:47 +03001419 (PAGE_SIZE - e->end < len)) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001420 e = NULL;
1421 } else {
1422 offset = e->end;
1423 e->end += len;
Wei Yongjuneeee2b52012-10-18 22:57:19 +08001424 e->users++;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001425 }
1426 spin_unlock(&con->writequeue_lock);
1427
1428 if (e) {
1429 got_one:
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001430 *ppc = page_address(e->page) + offset;
1431 return e;
1432 }
1433
1434 e = new_writequeue_entry(con, allocation);
1435 if (e) {
1436 spin_lock(&con->writequeue_lock);
1437 offset = e->end;
1438 e->end += len;
Wei Yongjuneeee2b52012-10-18 22:57:19 +08001439 e->users++;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001440 list_add_tail(&e->list, &con->writequeue);
1441 spin_unlock(&con->writequeue_lock);
1442 goto got_one;
1443 }
1444 return NULL;
1445}
1446
1447void dlm_lowcomms_commit_buffer(void *mh)
1448{
1449 struct writequeue_entry *e = (struct writequeue_entry *)mh;
1450 struct connection *con = e->con;
1451 int users;
1452
1453 spin_lock(&con->writequeue_lock);
1454 users = --e->users;
1455 if (users)
1456 goto out;
1457 e->len = e->end - e->offset;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001458 spin_unlock(&con->writequeue_lock);
1459
Bob Peterson01da24d2017-09-12 08:55:14 +00001460 queue_work(send_workqueue, &con->swork);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001461 return;
1462
1463out:
1464 spin_unlock(&con->writequeue_lock);
1465 return;
1466}
1467
1468/* Send a message */
1469static void send_to_sock(struct connection *con)
1470{
1471 int ret = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001472 const int msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL;
1473 struct writequeue_entry *e;
1474 int len, offset;
Bob Petersonf92c8dd2010-11-12 11:15:20 -06001475 int count = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001476
1477 mutex_lock(&con->sock_mutex);
1478 if (con->sock == NULL)
1479 goto out_connect;
1480
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001481 spin_lock(&con->writequeue_lock);
1482 for (;;) {
1483 e = list_entry(con->writequeue.next, struct writequeue_entry,
1484 list);
1485 if ((struct list_head *) e == &con->writequeue)
1486 break;
1487
1488 len = e->len;
1489 offset = e->offset;
1490 BUG_ON(len == 0 && e->users == 0);
1491 spin_unlock(&con->writequeue_lock);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001492
1493 ret = 0;
1494 if (len) {
Paolo Bonzini1329e3f2009-08-24 13:18:04 -05001495 ret = kernel_sendpage(con->sock, e->page, offset, len,
1496 msg_flags);
Patrick Caulfieldd66f8272007-09-14 08:49:21 +01001497 if (ret == -EAGAIN || ret == 0) {
David Millerb36930d2010-11-10 21:56:39 -08001498 if (ret == -EAGAIN &&
Eric Dumazet9cd3e072015-11-29 20:03:10 -08001499 test_bit(SOCKWQ_ASYNC_NOSPACE, &con->sock->flags) &&
David Millerb36930d2010-11-10 21:56:39 -08001500 !test_and_set_bit(CF_APP_LIMITED, &con->flags)) {
1501 /* Notify TCP that we're limited by the
1502 * application window size.
1503 */
1504 set_bit(SOCK_NOSPACE, &con->sock->flags);
1505 con->sock->sk->sk_write_pending++;
1506 }
Patrick Caulfieldd66f8272007-09-14 08:49:21 +01001507 cond_resched();
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001508 goto out;
Ying Xue9c5bef52012-08-13 14:29:55 +08001509 } else if (ret < 0)
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001510 goto send_error;
Patrick Caulfieldd66f8272007-09-14 08:49:21 +01001511 }
Bob Petersonf92c8dd2010-11-12 11:15:20 -06001512
1513 /* Don't starve people filling buffers */
1514 if (++count >= MAX_SEND_MSG_COUNT) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001515 cond_resched();
Bob Petersonf92c8dd2010-11-12 11:15:20 -06001516 count = 0;
1517 }
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001518
1519 spin_lock(&con->writequeue_lock);
Mike Christie5d689872013-06-14 04:56:13 -05001520 writequeue_entry_complete(e, ret);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001521 }
1522 spin_unlock(&con->writequeue_lock);
1523out:
1524 mutex_unlock(&con->sock_mutex);
1525 return;
1526
1527send_error:
1528 mutex_unlock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jpc553e172017-09-12 08:56:15 +00001529 close_connection(con, true, false, true);
Bob Peterson01da24d2017-09-12 08:55:14 +00001530 /* Requeue the send work. When the work daemon runs again, it will try
1531 a new connection, then call this function again. */
1532 queue_work(send_workqueue, &con->swork);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001533 return;
1534
1535out_connect:
1536 mutex_unlock(&con->sock_mutex);
Bob Peterson01da24d2017-09-12 08:55:14 +00001537 queue_work(send_workqueue, &con->swork);
1538 cond_resched();
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001539}
1540
1541static void clean_one_writequeue(struct connection *con)
1542{
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001543 struct writequeue_entry *e, *safe;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001544
1545 spin_lock(&con->writequeue_lock);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001546 list_for_each_entry_safe(e, safe, &con->writequeue, list) {
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001547 list_del(&e->list);
1548 free_entry(e);
1549 }
1550 spin_unlock(&con->writequeue_lock);
1551}
1552
1553/* Called from recovery when it knows that a node has
1554 left the cluster */
1555int dlm_lowcomms_close(int nodeid)
1556{
1557 struct connection *con;
David Teigland36b71a82012-07-26 12:44:30 -05001558 struct dlm_node_addr *na;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001559
1560 log_print("closing connection to node %d", nodeid);
1561 con = nodeid2con(nodeid, 0);
1562 if (con) {
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -05001563 set_bit(CF_CLOSE, &con->flags);
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -03001564 close_connection(con, true, true, true);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001565 clean_one_writequeue(con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001566 }
David Teigland36b71a82012-07-26 12:44:30 -05001567
1568 spin_lock(&dlm_node_addrs_spin);
1569 na = find_node_addr(nodeid);
1570 if (na) {
1571 list_del(&na->list);
1572 while (na->addr_count--)
1573 kfree(na->addr[na->addr_count]);
1574 kfree(na);
1575 }
1576 spin_unlock(&dlm_node_addrs_spin);
1577
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001578 return 0;
1579}
1580
1581/* Receive workqueue function */
1582static void process_recv_sockets(struct work_struct *work)
1583{
1584 struct connection *con = container_of(work, struct connection, rwork);
1585 int err;
1586
1587 clear_bit(CF_READ_PENDING, &con->flags);
1588 do {
1589 err = con->rx_action(con);
1590 } while (!err);
1591}
1592
1593/* Send workqueue function */
1594static void process_send_sockets(struct work_struct *work)
1595{
1596 struct connection *con = container_of(work, struct connection, swork);
1597
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001598 clear_bit(CF_WRITE_PENDING, &con->flags);
Bob Peterson61d9102b2017-09-12 08:55:04 +00001599 if (con->sock == NULL) /* not mutex protected so check it inside too */
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001600 con->connect_action(con);
Bob Peterson01da24d2017-09-12 08:55:14 +00001601 if (!list_empty(&con->writequeue))
Lars Marowsky-Bree063c4c92009-08-11 16:18:23 -05001602 send_to_sock(con);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001603}
1604
1605
1606/* Discard all entries on the write queues */
1607static void clean_writequeues(void)
1608{
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001609 foreach_conn(clean_one_writequeue);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001610}
1611
1612static void work_stop(void)
1613{
1614 destroy_workqueue(recv_workqueue);
1615 destroy_workqueue(send_workqueue);
1616}
1617
1618static int work_start(void)
1619{
David Teiglande43f0552011-03-10 13:22:34 -06001620 recv_workqueue = alloc_workqueue("dlm_recv",
1621 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
Namhyung Kimb9d41052010-12-13 13:42:24 -06001622 if (!recv_workqueue) {
1623 log_print("can't start dlm_recv");
1624 return -ENOMEM;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001625 }
1626
David Teiglande43f0552011-03-10 13:22:34 -06001627 send_workqueue = alloc_workqueue("dlm_send",
1628 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
Namhyung Kimb9d41052010-12-13 13:42:24 -06001629 if (!send_workqueue) {
1630 log_print("can't start dlm_send");
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001631 destroy_workqueue(recv_workqueue);
Namhyung Kimb9d41052010-12-13 13:42:24 -06001632 return -ENOMEM;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001633 }
1634
1635 return 0;
1636}
1637
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001638static void _stop_conn(struct connection *con, bool and_other)
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001639{
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001640 mutex_lock(&con->sock_mutex);
tsutomu.owa@toshiba.co.jp173a31f2017-09-12 09:01:24 +00001641 set_bit(CF_CLOSE, &con->flags);
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001642 set_bit(CF_READ_PENDING, &con->flags);
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001643 set_bit(CF_WRITE_PENDING, &con->flags);
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001644 if (con->sock && con->sock->sk) {
1645 write_lock_bh(&con->sock->sk->sk_callback_lock);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001646 con->sock->sk->sk_user_data = NULL;
tsutomu.owa@toshiba.co.jp93eaade2017-09-12 09:01:55 +00001647 write_unlock_bh(&con->sock->sk->sk_callback_lock);
1648 }
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001649 if (con->othercon && and_other)
1650 _stop_conn(con->othercon, false);
1651 mutex_unlock(&con->sock_mutex);
1652}
1653
1654static void stop_conn(struct connection *con)
1655{
1656 _stop_conn(con, true);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001657}
1658
1659static void free_conn(struct connection *con)
1660{
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -03001661 close_connection(con, true, true, true);
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001662 if (con->othercon)
1663 kmem_cache_free(con_cache, con->othercon);
1664 hlist_del(&con->list);
1665 kmem_cache_free(con_cache, con);
1666}
1667
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001668static void work_flush(void)
1669{
1670 int ok;
1671 int i;
1672 struct hlist_node *n;
1673 struct connection *con;
1674
1675 flush_workqueue(recv_workqueue);
1676 flush_workqueue(send_workqueue);
1677 do {
1678 ok = 1;
1679 foreach_conn(stop_conn);
1680 flush_workqueue(recv_workqueue);
1681 flush_workqueue(send_workqueue);
1682 for (i = 0; i < CONN_HASH_SIZE && ok; i++) {
1683 hlist_for_each_entry_safe(con, n,
1684 &connection_hash[i], list) {
1685 ok &= test_bit(CF_READ_PENDING, &con->flags);
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001686 ok &= test_bit(CF_WRITE_PENDING, &con->flags);
1687 if (con->othercon) {
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001688 ok &= test_bit(CF_READ_PENDING,
1689 &con->othercon->flags);
tsutomu.owa@toshiba.co.jp8a4abb02017-09-12 09:01:16 +00001690 ok &= test_bit(CF_WRITE_PENDING,
1691 &con->othercon->flags);
1692 }
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001693 }
1694 }
1695 } while (!ok);
1696}
1697
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001698void dlm_lowcomms_stop(void)
1699{
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001700 /* Set all the flags to prevent any
1701 socket activity.
1702 */
Matthias Kaehlcke7a936ce2008-05-12 10:04:51 -05001703 mutex_lock(&connections_lock);
David Teigland513ef592012-03-30 11:46:08 -05001704 dlm_allow_conn = 0;
tsutomu.owa@toshiba.co.jpf0fb83c2017-09-12 08:55:40 +00001705 mutex_unlock(&connections_lock);
1706 work_flush();
Marcelo Ricardo Leitner3a8db792016-10-08 10:14:37 -03001707 clean_writequeues();
1708 foreach_conn(free_conn);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001709 work_stop();
1710
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001711 kmem_cache_destroy(con_cache);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001712}
1713
1714int dlm_lowcomms_start(void)
1715{
1716 int error = -EINVAL;
1717 struct connection *con;
Christine Caulfield5e9ccc32009-01-28 12:57:40 -06001718 int i;
1719
1720 for (i = 0; i < CONN_HASH_SIZE; i++)
1721 INIT_HLIST_HEAD(&connection_hash[i]);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001722
1723 init_local();
1724 if (!dlm_local_count) {
David Teigland617e82e2007-04-26 13:46:49 -05001725 error = -ENOTCONN;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001726 log_print("no local IP address has been set");
David Teigland513ef592012-03-30 11:46:08 -05001727 goto fail;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001728 }
1729
1730 error = -ENOMEM;
1731 con_cache = kmem_cache_create("dlm_conn", sizeof(struct connection),
1732 __alignof__(struct connection), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09001733 NULL);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001734 if (!con_cache)
David Teigland513ef592012-03-30 11:46:08 -05001735 goto fail;
1736
1737 error = work_start();
1738 if (error)
1739 goto fail_destroy;
1740
1741 dlm_allow_conn = 1;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001742
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001743 /* Start listening */
1744 if (dlm_config.ci_protocol == 0)
1745 error = tcp_listen_for_all();
1746 else
1747 error = sctp_listen_for_all();
1748 if (error)
1749 goto fail_unlisten;
1750
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001751 return 0;
1752
1753fail_unlisten:
David Teigland513ef592012-03-30 11:46:08 -05001754 dlm_allow_conn = 0;
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001755 con = nodeid2con(0,0);
1756 if (con) {
Marcelo Ricardo Leitner0d737a82015-08-11 19:22:21 -03001757 close_connection(con, false, true, true);
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001758 kmem_cache_free(con_cache, con);
1759 }
David Teigland513ef592012-03-30 11:46:08 -05001760fail_destroy:
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001761 kmem_cache_destroy(con_cache);
David Teigland513ef592012-03-30 11:46:08 -05001762fail:
Patrick Caulfield6ed7257b2007-04-17 15:39:57 +01001763 return error;
1764}
David Teigland36b71a82012-07-26 12:44:30 -05001765
1766void dlm_lowcomms_exit(void)
1767{
1768 struct dlm_node_addr *na, *safe;
1769
1770 spin_lock(&dlm_node_addrs_spin);
1771 list_for_each_entry_safe(na, safe, &dlm_node_addrs, list) {
1772 list_del(&na->list);
1773 while (na->addr_count--)
1774 kfree(na->addr[na->addr_count]);
1775 kfree(na);
1776 }
1777 spin_unlock(&dlm_node_addrs_spin);
1778}