blob: be032850ae8ca042cd3b4ab262e6fc25928fe5a0 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howellsf66d7492016-04-04 14:00:34 +01002/* Peer event handling, typically ICMP messages.
David Howells17926a72007-04-26 15:48:28 -07003 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells17926a72007-04-26 15:48:28 -07006 */
7
8#include <linux/module.h>
9#include <linux/net.h>
10#include <linux/skbuff.h>
11#include <linux/errqueue.h>
12#include <linux/udp.h>
13#include <linux/in.h>
14#include <linux/in6.h>
15#include <linux/icmp.h>
16#include <net/sock.h>
17#include <net/af_rxrpc.h>
18#include <net/ip.h>
19#include "ar-internal.h"
20
David Howellsf66d7492016-04-04 14:00:34 +010021static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
David Howellsf3344302018-09-27 15:13:09 +010022static void rxrpc_distribute_error(struct rxrpc_peer *, int,
23 enum rxrpc_call_completion);
David Howellsf66d7492016-04-04 14:00:34 +010024
David Howells17926a72007-04-26 15:48:28 -070025/*
David Howellsbe6e6702016-04-04 14:00:32 +010026 * Find the peer associated with an ICMP packet.
27 */
28static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
David Howells494337c2018-05-10 23:26:01 +010029 const struct sk_buff *skb,
30 struct sockaddr_rxrpc *srx)
David Howellsbe6e6702016-04-04 14:00:32 +010031{
32 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
David Howellsbe6e6702016-04-04 14:00:32 +010033
34 _enter("");
35
David Howells494337c2018-05-10 23:26:01 +010036 memset(srx, 0, sizeof(*srx));
37 srx->transport_type = local->srx.transport_type;
38 srx->transport_len = local->srx.transport_len;
39 srx->transport.family = local->srx.transport.family;
David Howellsbe6e6702016-04-04 14:00:32 +010040
41 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
42 * versa?
43 */
David Howells494337c2018-05-10 23:26:01 +010044 switch (srx->transport.family) {
David Howellsbe6e6702016-04-04 14:00:32 +010045 case AF_INET:
David Howells46894a12018-10-04 09:32:28 +010046 srx->transport_len = sizeof(srx->transport.sin);
47 srx->transport.family = AF_INET;
David Howells494337c2018-05-10 23:26:01 +010048 srx->transport.sin.sin_port = serr->port;
David Howellsbe6e6702016-04-04 14:00:32 +010049 switch (serr->ee.ee_origin) {
50 case SO_EE_ORIGIN_ICMP:
51 _net("Rx ICMP");
David Howells494337c2018-05-10 23:26:01 +010052 memcpy(&srx->transport.sin.sin_addr,
David Howellsbe6e6702016-04-04 14:00:32 +010053 skb_network_header(skb) + serr->addr_offset,
54 sizeof(struct in_addr));
55 break;
56 case SO_EE_ORIGIN_ICMP6:
57 _net("Rx ICMP6 on v4 sock");
David Howells494337c2018-05-10 23:26:01 +010058 memcpy(&srx->transport.sin.sin_addr,
David Howellsbe6e6702016-04-04 14:00:32 +010059 skb_network_header(skb) + serr->addr_offset + 12,
60 sizeof(struct in_addr));
61 break;
62 default:
David Howells494337c2018-05-10 23:26:01 +010063 memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
David Howellsbe6e6702016-04-04 14:00:32 +010064 sizeof(struct in_addr));
65 break;
66 }
67 break;
68
David Howellsd1912742016-09-17 07:26:01 +010069#ifdef CONFIG_AF_RXRPC_IPV6
David Howells75b54cb2016-09-13 08:49:05 +010070 case AF_INET6:
David Howells75b54cb2016-09-13 08:49:05 +010071 switch (serr->ee.ee_origin) {
72 case SO_EE_ORIGIN_ICMP6:
73 _net("Rx ICMP6");
David Howells46894a12018-10-04 09:32:28 +010074 srx->transport.sin6.sin6_port = serr->port;
David Howells494337c2018-05-10 23:26:01 +010075 memcpy(&srx->transport.sin6.sin6_addr,
David Howells75b54cb2016-09-13 08:49:05 +010076 skb_network_header(skb) + serr->addr_offset,
77 sizeof(struct in6_addr));
78 break;
79 case SO_EE_ORIGIN_ICMP:
80 _net("Rx ICMP on v6 sock");
David Howells46894a12018-10-04 09:32:28 +010081 srx->transport_len = sizeof(srx->transport.sin);
82 srx->transport.family = AF_INET;
83 srx->transport.sin.sin_port = serr->port;
84 memcpy(&srx->transport.sin.sin_addr,
David Howells75b54cb2016-09-13 08:49:05 +010085 skb_network_header(skb) + serr->addr_offset,
86 sizeof(struct in_addr));
87 break;
88 default:
David Howells494337c2018-05-10 23:26:01 +010089 memcpy(&srx->transport.sin6.sin6_addr,
David Howells75b54cb2016-09-13 08:49:05 +010090 &ipv6_hdr(skb)->saddr,
91 sizeof(struct in6_addr));
92 break;
93 }
94 break;
David Howellsd1912742016-09-17 07:26:01 +010095#endif
David Howells75b54cb2016-09-13 08:49:05 +010096
David Howellsbe6e6702016-04-04 14:00:32 +010097 default:
98 BUG();
99 }
100
David Howells494337c2018-05-10 23:26:01 +0100101 return rxrpc_lookup_peer_rcu(local, srx);
David Howellsbe6e6702016-04-04 14:00:32 +0100102}
103
104/*
David Howells1a70c052016-04-04 14:00:33 +0100105 * Handle an MTU/fragmentation problem.
106 */
107static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
108{
109 u32 mtu = serr->ee.ee_info;
110
111 _net("Rx ICMP Fragmentation Needed (%d)", mtu);
112
113 /* wind down the local interface MTU */
114 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
115 peer->if_mtu = mtu;
116 _net("I/F MTU %u", mtu);
117 }
118
119 if (mtu == 0) {
120 /* they didn't give us a size, estimate one */
121 mtu = peer->if_mtu;
122 if (mtu > 1500) {
123 mtu >>= 1;
124 if (mtu < 1500)
125 mtu = 1500;
126 } else {
127 mtu -= 100;
128 if (mtu < peer->hdrsize)
129 mtu = peer->hdrsize + 4;
130 }
131 }
132
133 if (mtu < peer->mtu) {
134 spin_lock_bh(&peer->lock);
135 peer->mtu = mtu;
136 peer->maxdata = peer->mtu - peer->hdrsize;
137 spin_unlock_bh(&peer->lock);
138 _net("Net MTU %u (maxdata %u)",
139 peer->mtu, peer->maxdata);
140 }
141}
142
143/*
David Howellsf66d7492016-04-04 14:00:34 +0100144 * Handle an error received on the local endpoint.
David Howells17926a72007-04-26 15:48:28 -0700145 */
David Howellsabe89ef2016-04-04 14:00:32 +0100146void rxrpc_error_report(struct sock *sk)
David Howells17926a72007-04-26 15:48:28 -0700147{
148 struct sock_exterr_skb *serr;
David Howells494337c2018-05-10 23:26:01 +0100149 struct sockaddr_rxrpc srx;
Eric Dumazet2ca4f6c2019-10-14 06:04:38 -0700150 struct rxrpc_local *local;
David Howells17926a72007-04-26 15:48:28 -0700151 struct rxrpc_peer *peer;
152 struct sk_buff *skb;
David Howells17926a72007-04-26 15:48:28 -0700153
Eric Dumazet2ca4f6c2019-10-14 06:04:38 -0700154 rcu_read_lock();
155 local = rcu_dereference_sk_user_data(sk);
156 if (unlikely(!local)) {
157 rcu_read_unlock();
David Howellsf0308fb2019-10-10 15:52:34 +0100158 return;
Eric Dumazet2ca4f6c2019-10-14 06:04:38 -0700159 }
David Howells17926a72007-04-26 15:48:28 -0700160 _enter("%p{%d}", sk, local->debug_id);
161
Marc Dionne56d282d2019-04-12 16:33:40 +0100162 /* Clear the outstanding error value on the socket so that it doesn't
163 * cause kernel_sendmsg() to return it later.
164 */
165 sock_error(sk);
166
Willem de Bruijn364a9e92014-08-31 21:30:27 -0400167 skb = sock_dequeue_err_skb(sk);
David Howells17926a72007-04-26 15:48:28 -0700168 if (!skb) {
Eric Dumazet2ca4f6c2019-10-14 06:04:38 -0700169 rcu_read_unlock();
David Howells17926a72007-04-26 15:48:28 -0700170 _leave("UDP socket errqueue empty");
171 return;
172 }
David Howells987db9f2019-08-19 09:25:38 +0100173 rxrpc_new_skb(skb, rxrpc_skb_received);
Willem de Bruijnc247f052015-03-07 20:33:22 -0500174 serr = SKB_EXT_ERR(skb);
175 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
Willem de Bruijn49ca0d82015-01-30 13:29:31 -0500176 _leave("UDP empty message");
Eric Dumazet2ca4f6c2019-10-14 06:04:38 -0700177 rcu_read_unlock();
David Howells987db9f2019-08-19 09:25:38 +0100178 rxrpc_free_skb(skb, rxrpc_skb_freed);
Willem de Bruijn49ca0d82015-01-30 13:29:31 -0500179 return;
180 }
David Howells17926a72007-04-26 15:48:28 -0700181
David Howells494337c2018-05-10 23:26:01 +0100182 peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
David Howellsbe6e6702016-04-04 14:00:32 +0100183 if (peer && !rxrpc_get_peer_maybe(peer))
184 peer = NULL;
185 if (!peer) {
186 rcu_read_unlock();
David Howells987db9f2019-08-19 09:25:38 +0100187 rxrpc_free_skb(skb, rxrpc_skb_freed);
David Howells17926a72007-04-26 15:48:28 -0700188 _leave(" [no peer]");
189 return;
190 }
191
David Howells494337c2018-05-10 23:26:01 +0100192 trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
193
David Howells1a70c052016-04-04 14:00:33 +0100194 if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
195 serr->ee.ee_type == ICMP_DEST_UNREACH &&
196 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
197 rxrpc_adjust_mtu(peer, serr);
David Howellsf66d7492016-04-04 14:00:34 +0100198 rcu_read_unlock();
David Howells987db9f2019-08-19 09:25:38 +0100199 rxrpc_free_skb(skb, rxrpc_skb_freed);
David Howellsf66d7492016-04-04 14:00:34 +0100200 rxrpc_put_peer(peer);
201 _leave(" [MTU update]");
202 return;
David Howells17926a72007-04-26 15:48:28 -0700203 }
204
David Howellsf66d7492016-04-04 14:00:34 +0100205 rxrpc_store_error(peer, serr);
David Howellsbe6e6702016-04-04 14:00:32 +0100206 rcu_read_unlock();
David Howells987db9f2019-08-19 09:25:38 +0100207 rxrpc_free_skb(skb, rxrpc_skb_freed);
David Howells1890fea2018-10-15 22:37:21 +0100208 rxrpc_put_peer(peer);
David Howells17926a72007-04-26 15:48:28 -0700209
David Howells17926a72007-04-26 15:48:28 -0700210 _leave("");
211}
212
213/*
David Howellsf66d7492016-04-04 14:00:34 +0100214 * Map an error report to error codes on the peer record.
David Howells17926a72007-04-26 15:48:28 -0700215 */
David Howellsf66d7492016-04-04 14:00:34 +0100216static void rxrpc_store_error(struct rxrpc_peer *peer,
217 struct sock_exterr_skb *serr)
David Howells17926a72007-04-26 15:48:28 -0700218{
David Howellsf3344302018-09-27 15:13:09 +0100219 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
David Howells17926a72007-04-26 15:48:28 -0700220 struct sock_extended_err *ee;
David S. Millerc9d10c42011-05-19 18:37:11 -0400221 int err;
David Howells17926a72007-04-26 15:48:28 -0700222
223 _enter("");
224
David Howells17926a72007-04-26 15:48:28 -0700225 ee = &serr->ee;
226
David Howells17926a72007-04-26 15:48:28 -0700227 err = ee->ee_errno;
228
229 switch (ee->ee_origin) {
230 case SO_EE_ORIGIN_ICMP:
David Howells17926a72007-04-26 15:48:28 -0700231 switch (ee->ee_type) {
232 case ICMP_DEST_UNREACH:
233 switch (ee->ee_code) {
234 case ICMP_NET_UNREACH:
235 _net("Rx Received ICMP Network Unreachable");
David Howells17926a72007-04-26 15:48:28 -0700236 break;
237 case ICMP_HOST_UNREACH:
238 _net("Rx Received ICMP Host Unreachable");
David Howells17926a72007-04-26 15:48:28 -0700239 break;
240 case ICMP_PORT_UNREACH:
241 _net("Rx Received ICMP Port Unreachable");
David Howells17926a72007-04-26 15:48:28 -0700242 break;
243 case ICMP_NET_UNKNOWN:
244 _net("Rx Received ICMP Unknown Network");
David Howells17926a72007-04-26 15:48:28 -0700245 break;
246 case ICMP_HOST_UNKNOWN:
247 _net("Rx Received ICMP Unknown Host");
David Howells17926a72007-04-26 15:48:28 -0700248 break;
249 default:
250 _net("Rx Received ICMP DestUnreach code=%u",
251 ee->ee_code);
252 break;
253 }
254 break;
255
256 case ICMP_TIME_EXCEEDED:
257 _net("Rx Received ICMP TTL Exceeded");
258 break;
259
260 default:
261 _proto("Rx Received ICMP error { type=%u code=%u }",
262 ee->ee_type, ee->ee_code);
263 break;
264 }
265 break;
266
David Howellsf66d7492016-04-04 14:00:34 +0100267 case SO_EE_ORIGIN_NONE:
David Howells17926a72007-04-26 15:48:28 -0700268 case SO_EE_ORIGIN_LOCAL:
David Howellsfe77d5f2016-04-04 14:00:34 +0100269 _proto("Rx Received local error { error=%d }", err);
David Howellsf3344302018-09-27 15:13:09 +0100270 compl = RXRPC_CALL_LOCAL_ERROR;
David Howells17926a72007-04-26 15:48:28 -0700271 break;
272
David Howells17926a72007-04-26 15:48:28 -0700273 case SO_EE_ORIGIN_ICMP6:
David Howells23e2db32020-05-02 13:31:19 +0100274 if (err == EACCES)
275 err = EHOSTUNREACH;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -0500276 fallthrough;
David Howells17926a72007-04-26 15:48:28 -0700277 default:
David Howellsfe77d5f2016-04-04 14:00:34 +0100278 _proto("Rx Received error report { orig=%u }", ee->ee_origin);
David Howells17926a72007-04-26 15:48:28 -0700279 break;
280 }
281
David Howellsf3344302018-09-27 15:13:09 +0100282 rxrpc_distribute_error(peer, err, compl);
David Howellsf66d7492016-04-04 14:00:34 +0100283}
David Howells17926a72007-04-26 15:48:28 -0700284
David Howellsf66d7492016-04-04 14:00:34 +0100285/*
David Howellsf3344302018-09-27 15:13:09 +0100286 * Distribute an error that occurred on a peer.
David Howellsf66d7492016-04-04 14:00:34 +0100287 */
David Howellsf3344302018-09-27 15:13:09 +0100288static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
289 enum rxrpc_call_completion compl)
David Howellsf66d7492016-04-04 14:00:34 +0100290{
David Howellsf66d7492016-04-04 14:00:34 +0100291 struct rxrpc_call *call;
David Howells17926a72007-04-26 15:48:28 -0700292
David Howellsf3344302018-09-27 15:13:09 +0100293 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
David Howellse34d4232016-08-30 09:49:29 +0100294 rxrpc_see_call(call);
David Howells5ac0d622020-06-03 22:21:16 +0100295 rxrpc_set_call_completion(call, compl, 0, -error);
David Howells17926a72007-04-26 15:48:28 -0700296 }
David Howells17926a72007-04-26 15:48:28 -0700297}
David Howellscf1a6472016-09-22 00:41:53 +0100298
299/*
David Howells330bdcf2018-08-08 11:30:02 +0100300 * Perform keep-alive pings.
301 */
302static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
303 struct list_head *collector,
304 time64_t base,
305 u8 cursor)
306{
307 struct rxrpc_peer *peer;
308 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
309 time64_t keepalive_at;
310 int slot;
311
312 spin_lock_bh(&rxnet->peer_hash_lock);
313
314 while (!list_empty(collector)) {
315 peer = list_entry(collector->next,
316 struct rxrpc_peer, keepalive_link);
317
318 list_del_init(&peer->keepalive_link);
319 if (!rxrpc_get_peer_maybe(peer))
320 continue;
321
David Howells04d36d72020-01-30 21:50:36 +0000322 if (__rxrpc_use_local(peer->local)) {
323 spin_unlock_bh(&rxnet->peer_hash_lock);
David Howells330bdcf2018-08-08 11:30:02 +0100324
David Howells04d36d72020-01-30 21:50:36 +0000325 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
326 slot = keepalive_at - base;
327 _debug("%02x peer %u t=%d {%pISp}",
328 cursor, peer->debug_id, slot, &peer->srx.transport);
David Howells330bdcf2018-08-08 11:30:02 +0100329
David Howells04d36d72020-01-30 21:50:36 +0000330 if (keepalive_at <= base ||
331 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
332 rxrpc_send_keepalive(peer);
333 slot = RXRPC_KEEPALIVE_TIME;
334 }
335
336 /* A transmission to this peer occurred since last we
337 * examined it so put it into the appropriate future
338 * bucket.
339 */
340 slot += cursor;
341 slot &= mask;
342 spin_lock_bh(&rxnet->peer_hash_lock);
343 list_add_tail(&peer->keepalive_link,
344 &rxnet->peer_keepalive[slot & mask]);
345 rxrpc_unuse_local(peer->local);
David Howells330bdcf2018-08-08 11:30:02 +0100346 }
David Howells60034d3d2019-07-30 14:42:50 +0100347 rxrpc_put_peer_locked(peer);
David Howells330bdcf2018-08-08 11:30:02 +0100348 }
349
350 spin_unlock_bh(&rxnet->peer_hash_lock);
351}
352
353/*
David Howellsace45be2018-03-30 21:04:43 +0100354 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
355 */
356void rxrpc_peer_keepalive_worker(struct work_struct *work)
357{
358 struct rxrpc_net *rxnet =
359 container_of(work, struct rxrpc_net, peer_keepalive_work);
David Howells330bdcf2018-08-08 11:30:02 +0100360 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
361 time64_t base, now, delay;
362 u8 cursor, stop;
363 LIST_HEAD(collector);
David Howellsace45be2018-03-30 21:04:43 +0100364
David Howells330bdcf2018-08-08 11:30:02 +0100365 now = ktime_get_seconds();
David Howellsace45be2018-03-30 21:04:43 +0100366 base = rxnet->peer_keepalive_base;
367 cursor = rxnet->peer_keepalive_cursor;
David Howells330bdcf2018-08-08 11:30:02 +0100368 _enter("%lld,%u", base - now, cursor);
David Howellsace45be2018-03-30 21:04:43 +0100369
David Howells330bdcf2018-08-08 11:30:02 +0100370 if (!rxnet->live)
371 return;
David Howellsace45be2018-03-30 21:04:43 +0100372
David Howells330bdcf2018-08-08 11:30:02 +0100373 /* Remove to a temporary list all the peers that are currently lodged
374 * in expired buckets plus all new peers.
375 *
376 * Everything in the bucket at the cursor is processed this
377 * second; the bucket at cursor + 1 goes at now + 1s and so
378 * on...
David Howellsace45be2018-03-30 21:04:43 +0100379 */
David Howells330bdcf2018-08-08 11:30:02 +0100380 spin_lock_bh(&rxnet->peer_hash_lock);
381 list_splice_init(&rxnet->peer_keepalive_new, &collector);
David Howellsace45be2018-03-30 21:04:43 +0100382
David Howells330bdcf2018-08-08 11:30:02 +0100383 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
384 while (base <= now && (s8)(cursor - stop) < 0) {
385 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
386 &collector);
387 base++;
388 cursor++;
David Howellsace45be2018-03-30 21:04:43 +0100389 }
390
David Howells330bdcf2018-08-08 11:30:02 +0100391 base = now;
David Howellsace45be2018-03-30 21:04:43 +0100392 spin_unlock_bh(&rxnet->peer_hash_lock);
393
David Howellsace45be2018-03-30 21:04:43 +0100394 rxnet->peer_keepalive_base = base;
395 rxnet->peer_keepalive_cursor = cursor;
David Howells330bdcf2018-08-08 11:30:02 +0100396 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
397 ASSERT(list_empty(&collector));
398
399 /* Schedule the timer for the next occupied timeslot. */
400 cursor = rxnet->peer_keepalive_cursor;
401 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
402 for (; (s8)(cursor - stop) < 0; cursor++) {
403 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
404 break;
405 base++;
406 }
407
408 now = ktime_get_seconds();
409 delay = base - now;
410 if (delay < 1)
411 delay = 1;
412 delay *= HZ;
413 if (rxnet->live)
414 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
415
David Howellsace45be2018-03-30 21:04:43 +0100416 _leave("");
417}