David Howells | 8756361 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 1 | /* Local endpoint object management |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 2 | * |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 3 | * Copyright (C) 2016 Red Hat, Inc. All Rights Reserved. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 4 | * Written by David Howells (dhowells@redhat.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or |
David Howells | 8756361 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 7 | * modify it under the terms of the GNU General Public Licence |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 8 | * as published by the Free Software Foundation; either version |
David Howells | 8756361 | 2016-04-04 14:00:34 +0100 | [diff] [blame] | 9 | * 2 of the Licence, or (at your option) any later version. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 10 | */ |
| 11 | |
Joe Perches | 9b6d539 | 2016-06-02 12:08:52 -0700 | [diff] [blame] | 12 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 13 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 14 | #include <linux/module.h> |
| 15 | #include <linux/net.h> |
| 16 | #include <linux/skbuff.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 17 | #include <linux/slab.h> |
David Howells | 44ba069 | 2015-04-01 16:31:26 +0100 | [diff] [blame] | 18 | #include <linux/udp.h> |
| 19 | #include <linux/ip.h> |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 20 | #include <linux/hashtable.h> |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 21 | #include <net/sock.h> |
David Howells | 5271953 | 2018-10-04 11:10:51 +0100 | [diff] [blame] | 22 | #include <net/udp.h> |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 23 | #include <net/af_rxrpc.h> |
| 24 | #include "ar-internal.h" |
| 25 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 26 | static void rxrpc_local_processor(struct work_struct *); |
| 27 | static void rxrpc_local_rcu(struct rcu_head *); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 28 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 29 | /* |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 30 | * Compare a local to an address. Return -ve, 0 or +ve to indicate less than, |
| 31 | * same or greater than. |
| 32 | * |
| 33 | * We explicitly don't compare the RxRPC service ID as we want to reject |
| 34 | * conflicting uses by differing services. Further, we don't want to share |
| 35 | * addresses with different options (IPv6), so we don't compare those bits |
| 36 | * either. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 37 | */ |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 38 | static long rxrpc_local_cmp_key(const struct rxrpc_local *local, |
| 39 | const struct sockaddr_rxrpc *srx) |
| 40 | { |
| 41 | long diff; |
| 42 | |
| 43 | diff = ((local->srx.transport_type - srx->transport_type) ?: |
| 44 | (local->srx.transport_len - srx->transport_len) ?: |
| 45 | (local->srx.transport.family - srx->transport.family)); |
| 46 | if (diff != 0) |
| 47 | return diff; |
| 48 | |
| 49 | switch (srx->transport.family) { |
| 50 | case AF_INET: |
| 51 | /* If the choice of UDP port is left up to the transport, then |
| 52 | * the endpoint record doesn't match. |
| 53 | */ |
| 54 | return ((u16 __force)local->srx.transport.sin.sin_port - |
| 55 | (u16 __force)srx->transport.sin.sin_port) ?: |
| 56 | memcmp(&local->srx.transport.sin.sin_addr, |
| 57 | &srx->transport.sin.sin_addr, |
| 58 | sizeof(struct in_addr)); |
David Howells | d191274 | 2016-09-17 07:26:01 +0100 | [diff] [blame] | 59 | #ifdef CONFIG_AF_RXRPC_IPV6 |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 60 | case AF_INET6: |
| 61 | /* If the choice of UDP6 port is left up to the transport, then |
| 62 | * the endpoint record doesn't match. |
| 63 | */ |
| 64 | return ((u16 __force)local->srx.transport.sin6.sin6_port - |
| 65 | (u16 __force)srx->transport.sin6.sin6_port) ?: |
| 66 | memcmp(&local->srx.transport.sin6.sin6_addr, |
| 67 | &srx->transport.sin6.sin6_addr, |
| 68 | sizeof(struct in6_addr)); |
David Howells | d191274 | 2016-09-17 07:26:01 +0100 | [diff] [blame] | 69 | #endif |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 70 | default: |
| 71 | BUG(); |
| 72 | } |
| 73 | } |
| 74 | |
| 75 | /* |
| 76 | * Allocate a new local endpoint. |
| 77 | */ |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 78 | static struct rxrpc_local *rxrpc_alloc_local(struct rxrpc_net *rxnet, |
| 79 | const struct sockaddr_rxrpc *srx) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 80 | { |
| 81 | struct rxrpc_local *local; |
| 82 | |
| 83 | local = kzalloc(sizeof(struct rxrpc_local), GFP_KERNEL); |
| 84 | if (local) { |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 85 | atomic_set(&local->usage, 1); |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 86 | local->rxnet = rxnet; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 87 | INIT_LIST_HEAD(&local->link); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 88 | INIT_WORK(&local->processor, rxrpc_local_processor); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 89 | init_rwsem(&local->defrag_sem); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 90 | skb_queue_head_init(&local->reject_queue); |
David Howells | 44ba069 | 2015-04-01 16:31:26 +0100 | [diff] [blame] | 91 | skb_queue_head_init(&local->event_queue); |
David Howells | 999b69f | 2016-06-17 15:42:35 +0100 | [diff] [blame] | 92 | local->client_conns = RB_ROOT; |
| 93 | spin_lock_init(&local->client_conns_lock); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 94 | spin_lock_init(&local->lock); |
| 95 | rwlock_init(&local->services_lock); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 96 | local->debug_id = atomic_inc_return(&rxrpc_debug_id); |
| 97 | memcpy(&local->srx, srx, sizeof(*srx)); |
David Howells | 28036f4 | 2017-06-05 14:30:49 +0100 | [diff] [blame] | 98 | local->srx.srx_service = 0; |
David Howells | 09d2bf5 | 2018-03-30 21:05:28 +0100 | [diff] [blame] | 99 | trace_rxrpc_local(local, rxrpc_local_new, 1, NULL); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | _leave(" = %p", local); |
| 103 | return local; |
| 104 | } |
| 105 | |
| 106 | /* |
| 107 | * create the local socket |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 108 | * - must be called with rxrpc_local_mutex locked |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 109 | */ |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 110 | static int rxrpc_open_socket(struct rxrpc_local *local, struct net *net) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 111 | { |
David Howells | 5271953 | 2018-10-04 11:10:51 +0100 | [diff] [blame] | 112 | struct sock *usk; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 113 | int ret, opt; |
| 114 | |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 115 | _enter("%p{%d,%d}", |
| 116 | local, local->srx.transport_type, local->srx.transport.family); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 117 | |
| 118 | /* create a socket to represent the local endpoint */ |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 119 | ret = sock_create_kern(net, local->srx.transport.family, |
David Howells | aaa31cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 120 | local->srx.transport_type, 0, &local->socket); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 121 | if (ret < 0) { |
| 122 | _leave(" = %d [socket]", ret); |
| 123 | return ret; |
| 124 | } |
| 125 | |
David Howells | 2cfa227 | 2018-10-05 14:05:35 +0100 | [diff] [blame] | 126 | /* set the socket up */ |
David Howells | 5271953 | 2018-10-04 11:10:51 +0100 | [diff] [blame] | 127 | usk = local->socket->sk; |
| 128 | inet_sk(usk)->mc_loop = 0; |
| 129 | |
| 130 | /* Enable CHECKSUM_UNNECESSARY to CHECKSUM_COMPLETE conversion */ |
| 131 | inet_inc_convert_csum(usk); |
| 132 | |
| 133 | rcu_assign_sk_user_data(usk, local); |
| 134 | |
| 135 | udp_sk(usk)->encap_type = UDP_ENCAP_RXRPC; |
| 136 | udp_sk(usk)->encap_rcv = rxrpc_input_packet; |
| 137 | udp_sk(usk)->encap_destroy = NULL; |
| 138 | udp_sk(usk)->gro_receive = NULL; |
| 139 | udp_sk(usk)->gro_complete = NULL; |
| 140 | |
| 141 | udp_encap_enable(); |
David Howells | 7ec8dc9 | 2018-10-12 16:38:36 +0100 | [diff] [blame] | 142 | #if IS_ENABLED(CONFIG_AF_RXRPC_IPV6) |
David Howells | 5271953 | 2018-10-04 11:10:51 +0100 | [diff] [blame] | 143 | if (local->srx.transport.family == AF_INET6) |
| 144 | udpv6_encap_enable(); |
| 145 | #endif |
| 146 | usk->sk_error_report = rxrpc_error_report; |
David Howells | 2cfa227 | 2018-10-05 14:05:35 +0100 | [diff] [blame] | 147 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 148 | /* if a local address was supplied then bind it */ |
| 149 | if (local->srx.transport_len > sizeof(sa_family_t)) { |
| 150 | _debug("bind"); |
| 151 | ret = kernel_bind(local->socket, |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 152 | (struct sockaddr *)&local->srx.transport, |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 153 | local->srx.transport_len); |
| 154 | if (ret < 0) { |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 155 | _debug("bind failed %d", ret); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 156 | goto error; |
| 157 | } |
| 158 | } |
| 159 | |
David Howells | f2aeed3 | 2018-05-10 23:26:00 +0100 | [diff] [blame] | 160 | switch (local->srx.transport.family) { |
David Howells | 37a675e | 2018-09-27 15:13:09 +0100 | [diff] [blame] | 161 | case AF_INET6: |
| 162 | /* we want to receive ICMPv6 errors */ |
| 163 | opt = 1; |
| 164 | ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_RECVERR, |
| 165 | (char *) &opt, sizeof(opt)); |
| 166 | if (ret < 0) { |
| 167 | _debug("setsockopt failed"); |
| 168 | goto error; |
| 169 | } |
| 170 | |
| 171 | /* we want to set the don't fragment bit */ |
| 172 | opt = IPV6_PMTUDISC_DO; |
| 173 | ret = kernel_setsockopt(local->socket, SOL_IPV6, IPV6_MTU_DISCOVER, |
| 174 | (char *) &opt, sizeof(opt)); |
| 175 | if (ret < 0) { |
| 176 | _debug("setsockopt failed"); |
| 177 | goto error; |
| 178 | } |
| 179 | |
| 180 | /* Fall through and set IPv4 options too otherwise we don't get |
| 181 | * errors from IPv4 packets sent through the IPv6 socket. |
| 182 | */ |
| 183 | |
David Howells | f2aeed3 | 2018-05-10 23:26:00 +0100 | [diff] [blame] | 184 | case AF_INET: |
| 185 | /* we want to receive ICMP errors */ |
| 186 | opt = 1; |
| 187 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_RECVERR, |
| 188 | (char *) &opt, sizeof(opt)); |
| 189 | if (ret < 0) { |
| 190 | _debug("setsockopt failed"); |
| 191 | goto error; |
| 192 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 193 | |
David Howells | f2aeed3 | 2018-05-10 23:26:00 +0100 | [diff] [blame] | 194 | /* we want to set the don't fragment bit */ |
| 195 | opt = IP_PMTUDISC_DO; |
| 196 | ret = kernel_setsockopt(local->socket, SOL_IP, IP_MTU_DISCOVER, |
| 197 | (char *) &opt, sizeof(opt)); |
| 198 | if (ret < 0) { |
| 199 | _debug("setsockopt failed"); |
| 200 | goto error; |
| 201 | } |
David Howells | b604dd9 | 2018-09-27 15:13:08 +0100 | [diff] [blame] | 202 | |
| 203 | /* We want receive timestamps. */ |
| 204 | opt = 1; |
| 205 | ret = kernel_setsockopt(local->socket, SOL_SOCKET, SO_TIMESTAMPNS, |
| 206 | (char *)&opt, sizeof(opt)); |
| 207 | if (ret < 0) { |
| 208 | _debug("setsockopt failed"); |
| 209 | goto error; |
| 210 | } |
David Howells | f2aeed3 | 2018-05-10 23:26:00 +0100 | [diff] [blame] | 211 | break; |
| 212 | |
| 213 | default: |
| 214 | BUG(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 215 | } |
| 216 | |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 217 | _leave(" = 0"); |
| 218 | return 0; |
| 219 | |
| 220 | error: |
Trond Myklebust | 91cf45f | 2007-11-12 18:10:39 -0800 | [diff] [blame] | 221 | kernel_sock_shutdown(local->socket, SHUT_RDWR); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 222 | local->socket->sk->sk_user_data = NULL; |
| 223 | sock_release(local->socket); |
| 224 | local->socket = NULL; |
| 225 | |
| 226 | _leave(" = %d", ret); |
| 227 | return ret; |
| 228 | } |
| 229 | |
| 230 | /* |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 231 | * Look up or create a new local endpoint using the specified local address. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 232 | */ |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 233 | struct rxrpc_local *rxrpc_lookup_local(struct net *net, |
| 234 | const struct sockaddr_rxrpc *srx) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 235 | { |
| 236 | struct rxrpc_local *local; |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 237 | struct rxrpc_net *rxnet = rxrpc_net(net); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 238 | struct list_head *cursor; |
| 239 | const char *age; |
| 240 | long diff; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 241 | int ret; |
| 242 | |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 243 | _enter("{%d,%d,%pISp}", |
| 244 | srx->transport_type, srx->transport.family, &srx->transport); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 245 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 246 | mutex_lock(&rxnet->local_mutex); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 247 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 248 | for (cursor = rxnet->local_endpoints.next; |
| 249 | cursor != &rxnet->local_endpoints; |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 250 | cursor = cursor->next) { |
| 251 | local = list_entry(cursor, struct rxrpc_local, link); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 252 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 253 | diff = rxrpc_local_cmp_key(local, srx); |
| 254 | if (diff < 0) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 255 | continue; |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 256 | if (diff > 0) |
| 257 | break; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 258 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 259 | /* Services aren't allowed to share transport sockets, so |
| 260 | * reject that here. It is possible that the object is dying - |
| 261 | * but it may also still have the local transport address that |
| 262 | * we want bound. |
| 263 | */ |
| 264 | if (srx->srx_service) { |
| 265 | local = NULL; |
| 266 | goto addr_in_use; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 267 | } |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 268 | |
| 269 | /* Found a match. We replace a dying object. Attempting to |
| 270 | * bind the transport socket may still fail if we're attempting |
| 271 | * to use a local address that the dying object is still using. |
| 272 | */ |
David Howells | 5627cc8 | 2016-04-04 14:00:38 +0100 | [diff] [blame] | 273 | if (!rxrpc_get_local_maybe(local)) { |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 274 | cursor = cursor->next; |
| 275 | list_del_init(&local->link); |
| 276 | break; |
| 277 | } |
| 278 | |
| 279 | age = "old"; |
| 280 | goto found; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 281 | } |
| 282 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 283 | local = rxrpc_alloc_local(rxnet, srx); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 284 | if (!local) |
| 285 | goto nomem; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 286 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 287 | ret = rxrpc_open_socket(local, net); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 288 | if (ret < 0) |
| 289 | goto sock_error; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 290 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 291 | list_add_tail(&local->link, cursor); |
| 292 | age = "new"; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 293 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 294 | found: |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 295 | mutex_unlock(&rxnet->local_mutex); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 296 | |
David Howells | 75b54cb | 2016-09-13 08:49:05 +0100 | [diff] [blame] | 297 | _net("LOCAL %s %d {%pISp}", |
| 298 | age, local->debug_id, &local->srx.transport); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 299 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 300 | _leave(" = %p", local); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 301 | return local; |
| 302 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 303 | nomem: |
| 304 | ret = -ENOMEM; |
| 305 | sock_error: |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 306 | mutex_unlock(&rxnet->local_mutex); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 307 | kfree(local); |
| 308 | _leave(" = %d", ret); |
| 309 | return ERR_PTR(ret); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 310 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 311 | addr_in_use: |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 312 | mutex_unlock(&rxnet->local_mutex); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 313 | _leave(" = -EADDRINUSE"); |
| 314 | return ERR_PTR(-EADDRINUSE); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 315 | } |
| 316 | |
| 317 | /* |
David Howells | 09d2bf5 | 2018-03-30 21:05:28 +0100 | [diff] [blame] | 318 | * Get a ref on a local endpoint. |
| 319 | */ |
| 320 | struct rxrpc_local *rxrpc_get_local(struct rxrpc_local *local) |
| 321 | { |
| 322 | const void *here = __builtin_return_address(0); |
| 323 | int n; |
| 324 | |
| 325 | n = atomic_inc_return(&local->usage); |
| 326 | trace_rxrpc_local(local, rxrpc_local_got, n, here); |
| 327 | return local; |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * Get a ref on a local endpoint unless its usage has already reached 0. |
| 332 | */ |
| 333 | struct rxrpc_local *rxrpc_get_local_maybe(struct rxrpc_local *local) |
| 334 | { |
| 335 | const void *here = __builtin_return_address(0); |
| 336 | |
| 337 | if (local) { |
Mark Rutland | bfc18e3 | 2018-06-21 13:13:04 +0100 | [diff] [blame] | 338 | int n = atomic_fetch_add_unless(&local->usage, 1, 0); |
David Howells | 09d2bf5 | 2018-03-30 21:05:28 +0100 | [diff] [blame] | 339 | if (n > 0) |
| 340 | trace_rxrpc_local(local, rxrpc_local_got, n + 1, here); |
| 341 | else |
| 342 | local = NULL; |
| 343 | } |
| 344 | return local; |
| 345 | } |
| 346 | |
| 347 | /* |
| 348 | * Queue a local endpoint. |
| 349 | */ |
| 350 | void rxrpc_queue_local(struct rxrpc_local *local) |
| 351 | { |
| 352 | const void *here = __builtin_return_address(0); |
| 353 | |
| 354 | if (rxrpc_queue_work(&local->processor)) |
| 355 | trace_rxrpc_local(local, rxrpc_local_queued, |
| 356 | atomic_read(&local->usage), here); |
| 357 | } |
| 358 | |
| 359 | /* |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 360 | * A local endpoint reached its end of life. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 361 | */ |
David Howells | 09d2bf5 | 2018-03-30 21:05:28 +0100 | [diff] [blame] | 362 | static void __rxrpc_put_local(struct rxrpc_local *local) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 363 | { |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 364 | _enter("%d", local->debug_id); |
| 365 | rxrpc_queue_work(&local->processor); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 366 | } |
| 367 | |
| 368 | /* |
David Howells | 09d2bf5 | 2018-03-30 21:05:28 +0100 | [diff] [blame] | 369 | * Drop a ref on a local endpoint. |
| 370 | */ |
| 371 | void rxrpc_put_local(struct rxrpc_local *local) |
| 372 | { |
| 373 | const void *here = __builtin_return_address(0); |
| 374 | int n; |
| 375 | |
| 376 | if (local) { |
| 377 | n = atomic_dec_return(&local->usage); |
| 378 | trace_rxrpc_local(local, rxrpc_local_put, n, here); |
| 379 | |
| 380 | if (n == 0) |
| 381 | __rxrpc_put_local(local); |
| 382 | } |
| 383 | } |
| 384 | |
| 385 | /* |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 386 | * Destroy a local endpoint's socket and then hand the record to RCU to dispose |
| 387 | * of. |
| 388 | * |
| 389 | * Closing the socket cannot be done from bottom half context or RCU callback |
| 390 | * context because it might sleep. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 391 | */ |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 392 | static void rxrpc_local_destroyer(struct rxrpc_local *local) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 393 | { |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 394 | struct socket *socket = local->socket; |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 395 | struct rxrpc_net *rxnet = local->rxnet; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 396 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 397 | _enter("%d", local->debug_id); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 398 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 399 | /* We can get a race between an incoming call packet queueing the |
| 400 | * processor again and the work processor starting the destruction |
| 401 | * process which will shut down the UDP socket. |
| 402 | */ |
| 403 | if (local->dead) { |
| 404 | _leave(" [already dead]"); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 405 | return; |
| 406 | } |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 407 | local->dead = true; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 408 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 409 | mutex_lock(&rxnet->local_mutex); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 410 | list_del_init(&local->link); |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 411 | mutex_unlock(&rxnet->local_mutex); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 412 | |
David Howells | 999b69f | 2016-06-17 15:42:35 +0100 | [diff] [blame] | 413 | ASSERT(RB_EMPTY_ROOT(&local->client_conns)); |
David Howells | 1e9e5c9 | 2016-09-29 22:37:15 +0100 | [diff] [blame] | 414 | ASSERT(!local->service); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 415 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 416 | if (socket) { |
| 417 | local->socket = NULL; |
| 418 | kernel_sock_shutdown(socket, SHUT_RDWR); |
| 419 | socket->sk->sk_user_data = NULL; |
| 420 | sock_release(socket); |
| 421 | } |
| 422 | |
| 423 | /* At this point, there should be no more packets coming in to the |
| 424 | * local endpoint. |
| 425 | */ |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 426 | rxrpc_purge_queue(&local->reject_queue); |
David Howells | 44ba069 | 2015-04-01 16:31:26 +0100 | [diff] [blame] | 427 | rxrpc_purge_queue(&local->event_queue); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 428 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 429 | _debug("rcu local %d", local->debug_id); |
| 430 | call_rcu(&local->rcu, rxrpc_local_rcu); |
| 431 | } |
| 432 | |
| 433 | /* |
| 434 | * Process events on an endpoint |
| 435 | */ |
| 436 | static void rxrpc_local_processor(struct work_struct *work) |
| 437 | { |
| 438 | struct rxrpc_local *local = |
| 439 | container_of(work, struct rxrpc_local, processor); |
| 440 | bool again; |
| 441 | |
David Howells | 09d2bf5 | 2018-03-30 21:05:28 +0100 | [diff] [blame] | 442 | trace_rxrpc_local(local, rxrpc_local_processing, |
| 443 | atomic_read(&local->usage), NULL); |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 444 | |
| 445 | do { |
| 446 | again = false; |
| 447 | if (atomic_read(&local->usage) == 0) |
| 448 | return rxrpc_local_destroyer(local); |
| 449 | |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 450 | if (!skb_queue_empty(&local->reject_queue)) { |
| 451 | rxrpc_reject_packets(local); |
| 452 | again = true; |
| 453 | } |
| 454 | |
| 455 | if (!skb_queue_empty(&local->event_queue)) { |
| 456 | rxrpc_process_local_events(local); |
| 457 | again = true; |
| 458 | } |
| 459 | } while (again); |
| 460 | } |
| 461 | |
| 462 | /* |
| 463 | * Destroy a local endpoint after the RCU grace period expires. |
| 464 | */ |
| 465 | static void rxrpc_local_rcu(struct rcu_head *rcu) |
| 466 | { |
| 467 | struct rxrpc_local *local = container_of(rcu, struct rxrpc_local, rcu); |
| 468 | |
| 469 | _enter("%d", local->debug_id); |
| 470 | |
| 471 | ASSERT(!work_pending(&local->processor)); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 472 | |
| 473 | _net("DESTROY LOCAL %d", local->debug_id); |
| 474 | kfree(local); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 475 | _leave(""); |
| 476 | } |
| 477 | |
| 478 | /* |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 479 | * Verify the local endpoint list is empty by this point. |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 480 | */ |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 481 | void rxrpc_destroy_all_locals(struct rxrpc_net *rxnet) |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 482 | { |
David Howells | 4f95dd7 | 2016-04-04 14:00:35 +0100 | [diff] [blame] | 483 | struct rxrpc_local *local; |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 484 | |
| 485 | _enter(""); |
| 486 | |
David Howells | dee4636 | 2016-06-27 17:11:19 +0100 | [diff] [blame] | 487 | flush_workqueue(rxrpc_workqueue); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 488 | |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 489 | if (!list_empty(&rxnet->local_endpoints)) { |
| 490 | mutex_lock(&rxnet->local_mutex); |
| 491 | list_for_each_entry(local, &rxnet->local_endpoints, link) { |
David Howells | dee4636 | 2016-06-27 17:11:19 +0100 | [diff] [blame] | 492 | pr_err("AF_RXRPC: Leaked local %p {%d}\n", |
| 493 | local, atomic_read(&local->usage)); |
| 494 | } |
David Howells | 2baec2c | 2017-05-24 17:02:32 +0100 | [diff] [blame] | 495 | mutex_unlock(&rxnet->local_mutex); |
David Howells | dee4636 | 2016-06-27 17:11:19 +0100 | [diff] [blame] | 496 | BUG(); |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 497 | } |
David Howells | 17926a7 | 2007-04-26 15:48:28 -0700 | [diff] [blame] | 498 | } |