blob: b493e6b6274043e07b15c5a0481f0e92a4478ea4 [file] [log] [blame]
David Howells87563612016-04-04 14:00:34 +01001/* Local endpoint object management
David Howells17926a72007-04-26 15:48:28 -07002 *
David Howells4f95dd72016-04-04 14:00:35 +01003 * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved.
David Howells17926a72007-04-26 15:48:28 -07004 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
David Howells87563612016-04-04 14:00:34 +01007 * modify it under the terms of the GNU General Public Licence
David Howells17926a72007-04-26 15:48:28 -07008 * as published by the Free Software Foundation; either version
David Howells87563612016-04-04 14:00:34 +01009 * 2 of the Licence, or (at your option) any later version.
David Howells17926a72007-04-26 15:48:28 -070010 */
11
Joe Perches9b6d5392016-06-02 12:08:52 -070012#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
David Howells17926a72007-04-26 15:48:28 -070014#include <linux/module.h>
15#include <linux/net.h>
16#include <linux/skbuff.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
David Howells44ba0692015-04-01 16:31:26 +010018#include <linux/udp.h>
19#include <linux/ip.h>
David Howells4f95dd72016-04-04 14:00:35 +010020#include <linux/hashtable.h>
David Howells17926a72007-04-26 15:48:28 -070021#include <net/sock.h>
22#include <net/af_rxrpc.h>
23#include "ar-internal.h"
24
David Howells4f95dd72016-04-04 14:00:35 +010025static void rxrpc_local_processor(struct work_struct *);
26static void rxrpc_local_rcu(struct rcu_head *);
David Howells17926a72007-04-26 15:48:28 -070027
David Howells17926a72007-04-26 15:48:28 -070028/*
David Howells4f95dd72016-04-04 14:00:35 +010029 * Compare a local to an address. Return -ve, 0 or +ve to indicate less than,
30 * same or greater than.
31 *
32 * We explicitly don't compare the RxRPC service ID as we want to reject
33 * conflicting uses by differing services. Further, we don't want to share
34 * addresses with different options (IPv6), so we don't compare those bits
35 * either.
David Howells17926a72007-04-26 15:48:28 -070036 */
David Howells4f95dd72016-04-04 14:00:35 +010037static long rxrpc_local_cmp_key(const struct rxrpc_local *local,
38 const struct sockaddr_rxrpc *srx)
39{
40 long diff;
41
42 diff = ((local->srx.transport_type - srx->transport_type) ?:
43 (local->srx.transport_len - srx->transport_len) ?:
44 (local->srx.transport.family - srx->transport.family));
45 if (diff != 0)
46 return diff;
47
48 switch (srx->transport.family) {
49 case AF_INET:
50 /* If the choice of UDP port is left up to the transport, then
51 * the endpoint record doesn't match.
52 */
53 return ((u16 __force)local->srx.transport.sin.sin_port -
54 (u16 __force)srx->transport.sin.sin_port) ?:
55 memcmp(&local->srx.transport.sin.sin_addr,
56 &srx->transport.sin.sin_addr,
57 sizeof(struct in_addr));
David Howellsd1912742016-09-17 07:26:01 +010058#ifdef CONFIG_AF_RXRPC_IPV6
David Howells75b54cb2016-09-13 08:49:05 +010059 case AF_INET6:
60 /* If the choice of UDP6 port is left up to the transport, then
61 * the endpoint record doesn't match.
62 */
63 return ((u16 __force)local->srx.transport.sin6.sin6_port -
64 (u16 __force)srx->transport.sin6.sin6_port) ?:
65 memcmp(&local->srx.transport.sin6.sin6_addr,
66 &srx->transport.sin6.sin6_addr,
67 sizeof(struct in6_addr));
David Howellsd1912742016-09-17 07:26:01 +010068#endif
David Howells4f95dd72016-04-04 14:00:35 +010069 default:
70 BUG();
71 }
72}
73
74/*
75 * Allocate a new local endpoint.
76 */
David Howells2baec2c2017-05-24 17:02:32 +010077static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet,
78 const struct sockaddr_rxrpc *srx)
David Howells17926a72007-04-26 15:48:28 -070079{
80 struct rxrpc_local *local;
81
82 local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL);
83 if (local) {
David Howells4f95dd72016-04-04 14:00:35 +010084 atomic_set(&local->usage, 1);
David Howells2baec2c2017-05-24 17:02:32 +010085 local->rxnet = rxnet;
David Howells17926a72007-04-26 15:48:28 -070086 INIT_LIST_HEAD(&local->link);
David Howells4f95dd72016-04-04 14:00:35 +010087 INIT_WORK(&local->processor, rxrpc_local_processor);
David Howells17926a72007-04-26 15:48:28 -070088 init_rwsem(&local->defrag_sem);
David Howells17926a72007-04-26 15:48:28 -070089 skb_queue_head_init(&local->reject_queue);
David Howells44ba0692015-04-01 16:31:26 +010090 skb_queue_head_init(&local->event_queue);
David Howells999b69f2016-06-17 15:42:35 +010091 local->client_conns = RB_ROOT;
92 spin_lock_init(&local->client_conns_lock);
David Howells17926a72007-04-26 15:48:28 -070093 spin_lock_init(&local->lock);
94 rwlock_init(&local->services_lock);
David Howells17926a72007-04-26 15:48:28 -070095 local->debug_id = atomic_inc_return(&rxrpc_debug_id);
96 memcpy(&local->srx, srx, sizeof(*srx));
David Howells28036f42017-06-05 14:30:49 +010097 local->srx.srx_service = 0;
David Howells09d2bf52018-03-30 21:05:28 +010098 trace_rxrpc_local(local, rxrpc_local_new, 1, NULL);
David Howells17926a72007-04-26 15:48:28 -070099 }
100
101 _leave(" = %p", local);
102 return local;
103}
104
105/*
106 * create the local socket
David Howells4f95dd72016-04-04 14:00:35 +0100107 * - must be called with rxrpc_local_mutex locked
David Howells17926a72007-04-26 15:48:28 -0700108 */
David Howells2baec2c2017-05-24 17:02:32 +0100109static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net)
David Howells17926a72007-04-26 15:48:28 -0700110{
111 struct sock *sock;
112 int ret, opt;
113
David Howells75b54cb2016-09-13 08:49:05 +0100114 _enter("%p{%d,%d}",
115 local, local->srx.transport_type, local->srx.transport.family);
David Howells17926a72007-04-26 15:48:28 -0700116
117 /* create a socket to represent the local endpoint */
David Howells2baec2c2017-05-24 17:02:32 +0100118 ret = sock_create_kern(net, local->srx.transport.family,
David Howellsaaa31cb2016-09-13 08:49:05 +0100119 local->srx.transport_type, 0, &local->socket);
David Howells17926a72007-04-26 15:48:28 -0700120 if (ret < 0) {
121 _leave(" = %d [socket]", ret);
122 return ret;
123 }
124
125 /* if a local address was supplied then bind it */
126 if (local->srx.transport_len > sizeof(sa_family_t)) {
127 _debug("bind");
128 ret = kernel_bind(local->socket,
David Howells4f95dd72016-04-04 14:00:35 +0100129 (struct sockaddr *)&local->srx.transport,
David Howells17926a72007-04-26 15:48:28 -0700130 local->srx.transport_len);
131 if (ret < 0) {
David Howells4f95dd72016-04-04 14:00:35 +0100132 _debug("bind failed %d", ret);
David Howells17926a72007-04-26 15:48:28 -0700133 goto error;
134 }
135 }
136
David Howellsf2aeed32018-05-10 23:26:00 +0100137 switch (local->srx.transport.family) {
138 case AF_INET:
139 /* we want to receive ICMP errors */
140 opt = 1;
141 ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR,
142 (char *) &opt, sizeof(opt));
143 if (ret < 0) {
144 _debug("setsockopt failed");
145 goto error;
146 }
David Howells17926a72007-04-26 15:48:28 -0700147
David Howellsf2aeed32018-05-10 23:26:00 +0100148 /* we want to set the don't fragment bit */
149 opt = IP_PMTUDISC_DO;
150 ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER,
151 (char *) &opt, sizeof(opt));
152 if (ret < 0) {
153 _debug("setsockopt failed");
154 goto error;
155 }
156 break;
157
158 case AF_INET6:
159 /* we want to receive ICMP errors */
160 opt = 1;
161 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR,
162 (char *) &opt, sizeof(opt));
163 if (ret < 0) {
164 _debug("setsockopt failed");
165 goto error;
166 }
167
168 /* we want to set the don't fragment bit */
169 opt = IPV6_PMTUDISC_DO;
170 ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER,
171 (char *) &opt, sizeof(opt));
172 if (ret < 0) {
173 _debug("setsockopt failed");
174 goto error;
175 }
176 break;
177
178 default:
179 BUG();
David Howells17926a72007-04-26 15:48:28 -0700180 }
181
David Howells17926a72007-04-26 15:48:28 -0700182 /* set the socket up */
183 sock = local->socket->sk;
184 sock->sk_user_data = local;
185 sock->sk_data_ready = rxrpc_data_ready;
David Howellsabe89ef2016-04-04 14:00:32 +0100186 sock->sk_error_report = rxrpc_error_report;
David Howells17926a72007-04-26 15:48:28 -0700187 _leave(" = 0");
188 return 0;
189
190error:
Trond Myklebust91cf45f2007-11-12 18:10:39 -0800191 kernel_sock_shutdown(local->socket, SHUT_RDWR);
David Howells17926a72007-04-26 15:48:28 -0700192 local->socket->sk->sk_user_data = NULL;
193 sock_release(local->socket);
194 local->socket = NULL;
195
196 _leave(" = %d", ret);
197 return ret;
198}
199
200/*
David Howells4f95dd72016-04-04 14:00:35 +0100201 * Look up or create a new local endpoint using the specified local address.
David Howells17926a72007-04-26 15:48:28 -0700202 */
David Howells2baec2c2017-05-24 17:02:32 +0100203struct rxrpc_local *rxrpc_lookup_local(struct net *net,
204 const struct sockaddr_rxrpc *srx)
David Howells17926a72007-04-26 15:48:28 -0700205{
206 struct rxrpc_local *local;
David Howells2baec2c2017-05-24 17:02:32 +0100207 struct rxrpc_net *rxnet = rxrpc_net(net);
David Howells4f95dd72016-04-04 14:00:35 +0100208 struct list_head *cursor;
209 const char *age;
210 long diff;
David Howells17926a72007-04-26 15:48:28 -0700211 int ret;
212
David Howells75b54cb2016-09-13 08:49:05 +0100213 _enter("{%d,%d,%pISp}",
214 srx->transport_type, srx->transport.family, &srx->transport);
David Howells17926a72007-04-26 15:48:28 -0700215
David Howells2baec2c2017-05-24 17:02:32 +0100216 mutex_lock(&rxnet->local_mutex);
David Howells17926a72007-04-26 15:48:28 -0700217
David Howells2baec2c2017-05-24 17:02:32 +0100218 for (cursor = rxnet->local_endpoints.next;
219 cursor != &rxnet->local_endpoints;
David Howells4f95dd72016-04-04 14:00:35 +0100220 cursor = cursor->next) {
221 local = list_entry(cursor, struct rxrpc_local, link);
David Howells17926a72007-04-26 15:48:28 -0700222
David Howells4f95dd72016-04-04 14:00:35 +0100223 diff = rxrpc_local_cmp_key(local, srx);
224 if (diff < 0)
David Howells17926a72007-04-26 15:48:28 -0700225 continue;
David Howells4f95dd72016-04-04 14:00:35 +0100226 if (diff > 0)
227 break;
David Howells17926a72007-04-26 15:48:28 -0700228
David Howells4f95dd72016-04-04 14:00:35 +0100229 /* Services aren't allowed to share transport sockets, so
230 * reject that here. It is possible that the object is dying -
231 * but it may also still have the local transport address that
232 * we want bound.
233 */
234 if (srx->srx_service) {
235 local = NULL;
236 goto addr_in_use;
David Howells17926a72007-04-26 15:48:28 -0700237 }
David Howells4f95dd72016-04-04 14:00:35 +0100238
239 /* Found a match. We replace a dying object. Attempting to
240 * bind the transport socket may still fail if we're attempting
241 * to use a local address that the dying object is still using.
242 */
David Howells5627cc82016-04-04 14:00:38 +0100243 if (!rxrpc_get_local_maybe(local)) {
David Howells4f95dd72016-04-04 14:00:35 +0100244 cursor = cursor->next;
245 list_del_init(&local->link);
246 break;
247 }
248
249 age = "old";
250 goto found;
David Howells17926a72007-04-26 15:48:28 -0700251 }
252
David Howells2baec2c2017-05-24 17:02:32 +0100253 local = rxrpc_alloc_local(rxnet, srx);
David Howells4f95dd72016-04-04 14:00:35 +0100254 if (!local)
255 goto nomem;
David Howells17926a72007-04-26 15:48:28 -0700256
David Howells2baec2c2017-05-24 17:02:32 +0100257 ret = rxrpc_open_socket(local, net);
David Howells4f95dd72016-04-04 14:00:35 +0100258 if (ret < 0)
259 goto sock_error;
David Howells17926a72007-04-26 15:48:28 -0700260
David Howells4f95dd72016-04-04 14:00:35 +0100261 list_add_tail(&local->link, cursor);
262 age = "new";
David Howells17926a72007-04-26 15:48:28 -0700263
David Howells4f95dd72016-04-04 14:00:35 +0100264found:
David Howells2baec2c2017-05-24 17:02:32 +0100265 mutex_unlock(&rxnet->local_mutex);
David Howells4f95dd72016-04-04 14:00:35 +0100266
David Howells75b54cb2016-09-13 08:49:05 +0100267 _net("LOCAL %s %d {%pISp}",
268 age, local->debug_id, &local->srx.transport);
David Howells17926a72007-04-26 15:48:28 -0700269
David Howells4f95dd72016-04-04 14:00:35 +0100270 _leave(" = %p", local);
David Howells17926a72007-04-26 15:48:28 -0700271 return local;
272
David Howells4f95dd72016-04-04 14:00:35 +0100273nomem:
274 ret = -ENOMEM;
275sock_error:
David Howells2baec2c2017-05-24 17:02:32 +0100276 mutex_unlock(&rxnet->local_mutex);
David Howells4f95dd72016-04-04 14:00:35 +0100277 kfree(local);
278 _leave(" = %d", ret);
279 return ERR_PTR(ret);
David Howells17926a72007-04-26 15:48:28 -0700280
David Howells4f95dd72016-04-04 14:00:35 +0100281addr_in_use:
David Howells2baec2c2017-05-24 17:02:32 +0100282 mutex_unlock(&rxnet->local_mutex);
David Howells4f95dd72016-04-04 14:00:35 +0100283 _leave(" = -EADDRINUSE");
284 return ERR_PTR(-EADDRINUSE);
David Howells17926a72007-04-26 15:48:28 -0700285}
286
287/*
David Howells09d2bf52018-03-30 21:05:28 +0100288 * Get a ref on a local endpoint.
289 */
290struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local)
291{
292 const void *here = __builtin_return_address(0);
293 int n;
294
295 n = atomic_inc_return(&local->usage);
296 trace_rxrpc_local(local, rxrpc_local_got, n, here);
297 return local;
298}
299
300/*
301 * Get a ref on a local endpoint unless its usage has already reached 0.
302 */
303struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local)
304{
305 const void *here = __builtin_return_address(0);
306
307 if (local) {
308 int n = __atomic_add_unless(&local->usage, 1, 0);
309 if (n > 0)
310 trace_rxrpc_local(local, rxrpc_local_got, n + 1, here);
311 else
312 local = NULL;
313 }
314 return local;
315}
316
317/*
318 * Queue a local endpoint.
319 */
320void rxrpc_queue_local(struct rxrpc_local *local)
321{
322 const void *here = __builtin_return_address(0);
323
324 if (rxrpc_queue_work(&local->processor))
325 trace_rxrpc_local(local, rxrpc_local_queued,
326 atomic_read(&local->usage), here);
327}
328
329/*
David Howells4f95dd72016-04-04 14:00:35 +0100330 * A local endpoint reached its end of life.
David Howells17926a72007-04-26 15:48:28 -0700331 */
David Howells09d2bf52018-03-30 21:05:28 +0100332static void __rxrpc_put_local(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700333{
David Howells4f95dd72016-04-04 14:00:35 +0100334 _enter("%d", local->debug_id);
335 rxrpc_queue_work(&local->processor);
David Howells17926a72007-04-26 15:48:28 -0700336}
337
338/*
David Howells09d2bf52018-03-30 21:05:28 +0100339 * Drop a ref on a local endpoint.
340 */
341void rxrpc_put_local(struct rxrpc_local *local)
342{
343 const void *here = __builtin_return_address(0);
344 int n;
345
346 if (local) {
347 n = atomic_dec_return(&local->usage);
348 trace_rxrpc_local(local, rxrpc_local_put, n, here);
349
350 if (n == 0)
351 __rxrpc_put_local(local);
352 }
353}
354
355/*
David Howells4f95dd72016-04-04 14:00:35 +0100356 * Destroy a local endpoint's socket and then hand the record to RCU to dispose
357 * of.
358 *
359 * Closing the socket cannot be done from bottom half context or RCU callback
360 * context because it might sleep.
David Howells17926a72007-04-26 15:48:28 -0700361 */
David Howells4f95dd72016-04-04 14:00:35 +0100362static void rxrpc_local_destroyer(struct rxrpc_local *local)
David Howells17926a72007-04-26 15:48:28 -0700363{
David Howells4f95dd72016-04-04 14:00:35 +0100364 struct socket *socket = local->socket;
David Howells2baec2c2017-05-24 17:02:32 +0100365 struct rxrpc_net *rxnet = local->rxnet;
David Howells17926a72007-04-26 15:48:28 -0700366
David Howells4f95dd72016-04-04 14:00:35 +0100367 _enter("%d", local->debug_id);
David Howells17926a72007-04-26 15:48:28 -0700368
David Howells4f95dd72016-04-04 14:00:35 +0100369 /* We can get a race between an incoming call packet queueing the
370 * processor again and the work processor starting the destruction
371 * process which will shut down the UDP socket.
372 */
373 if (local->dead) {
374 _leave(" [already dead]");
David Howells17926a72007-04-26 15:48:28 -0700375 return;
376 }
David Howells4f95dd72016-04-04 14:00:35 +0100377 local->dead = true;
David Howells17926a72007-04-26 15:48:28 -0700378
David Howells2baec2c2017-05-24 17:02:32 +0100379 mutex_lock(&rxnet->local_mutex);
David Howells4f95dd72016-04-04 14:00:35 +0100380 list_del_init(&local->link);
David Howells2baec2c2017-05-24 17:02:32 +0100381 mutex_unlock(&rxnet->local_mutex);
David Howells17926a72007-04-26 15:48:28 -0700382
David Howells999b69f2016-06-17 15:42:35 +0100383 ASSERT(RB_EMPTY_ROOT(&local->client_conns));
David Howells1e9e5c92016-09-29 22:37:15 +0100384 ASSERT(!local->service);
David Howells17926a72007-04-26 15:48:28 -0700385
David Howells4f95dd72016-04-04 14:00:35 +0100386 if (socket) {
387 local->socket = NULL;
388 kernel_sock_shutdown(socket, SHUT_RDWR);
389 socket->sk->sk_user_data = NULL;
390 sock_release(socket);
391 }
392
393 /* At this point, there should be no more packets coming in to the
394 * local endpoint.
395 */
David Howells17926a72007-04-26 15:48:28 -0700396 rxrpc_purge_queue(&local->reject_queue);
David Howells44ba0692015-04-01 16:31:26 +0100397 rxrpc_purge_queue(&local->event_queue);
David Howells17926a72007-04-26 15:48:28 -0700398
David Howells4f95dd72016-04-04 14:00:35 +0100399 _debug("rcu local %d", local->debug_id);
400 call_rcu(&local->rcu, rxrpc_local_rcu);
401}
402
403/*
404 * Process events on an endpoint
405 */
406static void rxrpc_local_processor(struct work_struct *work)
407{
408 struct rxrpc_local *local =
409 container_of(work, struct rxrpc_local, processor);
410 bool again;
411
David Howells09d2bf52018-03-30 21:05:28 +0100412 trace_rxrpc_local(local, rxrpc_local_processing,
413 atomic_read(&local->usage), NULL);
David Howells4f95dd72016-04-04 14:00:35 +0100414
415 do {
416 again = false;
417 if (atomic_read(&local->usage) == 0)
418 return rxrpc_local_destroyer(local);
419
David Howells4f95dd72016-04-04 14:00:35 +0100420 if (!skb_queue_empty(&local->reject_queue)) {
421 rxrpc_reject_packets(local);
422 again = true;
423 }
424
425 if (!skb_queue_empty(&local->event_queue)) {
426 rxrpc_process_local_events(local);
427 again = true;
428 }
429 } while (again);
430}
431
432/*
433 * Destroy a local endpoint after the RCU grace period expires.
434 */
435static void rxrpc_local_rcu(struct rcu_head *rcu)
436{
437 struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu);
438
439 _enter("%d", local->debug_id);
440
441 ASSERT(!work_pending(&local->processor));
David Howells17926a72007-04-26 15:48:28 -0700442
443 _net("DESTROY LOCAL %d", local->debug_id);
444 kfree(local);
David Howells17926a72007-04-26 15:48:28 -0700445 _leave("");
446}
447
448/*
David Howells4f95dd72016-04-04 14:00:35 +0100449 * Verify the local endpoint list is empty by this point.
David Howells17926a72007-04-26 15:48:28 -0700450 */
David Howells2baec2c2017-05-24 17:02:32 +0100451void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet)
David Howells17926a72007-04-26 15:48:28 -0700452{
David Howells4f95dd72016-04-04 14:00:35 +0100453 struct rxrpc_local *local;
David Howells17926a72007-04-26 15:48:28 -0700454
455 _enter("");
456
David Howellsdee46362016-06-27 17:11:19 +0100457 flush_workqueue(rxrpc_workqueue);
David Howells17926a72007-04-26 15:48:28 -0700458
David Howells2baec2c2017-05-24 17:02:32 +0100459 if (!list_empty(&rxnet->local_endpoints)) {
460 mutex_lock(&rxnet->local_mutex);
461 list_for_each_entry(local, &rxnet->local_endpoints, link) {
David Howellsdee46362016-06-27 17:11:19 +0100462 pr_err("AF_RXRPC: Leaked local %p {%d}\n",
463 local, atomic_read(&local->usage));
464 }
David Howells2baec2c2017-05-24 17:02:32 +0100465 mutex_unlock(&rxnet->local_mutex);
David Howellsdee46362016-06-27 17:11:19 +0100466 BUG();
David Howells17926a72007-04-26 15:48:28 -0700467 }
David Howells17926a72007-04-26 15:48:28 -0700468}