blob: 05b51bdbdd41d0248d9b9c97dbcd9348277371a4 [file] [log] [blame]
David Howellsf66d7492016-04-04 14:00:34 +01001/* Peer event handling, typically ICMP messages.
David Howells17926a72007-04-26 15:48:28 -07002 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/module.h>
13#include <linux/net.h>
14#include <linux/skbuff.h>
15#include <linux/errqueue.h>
16#include <linux/udp.h>
17#include <linux/in.h>
18#include <linux/in6.h>
19#include <linux/icmp.h>
20#include <net/sock.h>
21#include <net/af_rxrpc.h>
22#include <net/ip.h>
23#include "ar-internal.h"
24
David Howellsf66d7492016-04-04 14:00:34 +010025static void rxrpc_store_error(struct rxrpc_peer *, struct sock_exterr_skb *);
David Howellsf3344302018-09-27 15:13:09 +010026static void rxrpc_distribute_error(struct rxrpc_peer *, int,
27 enum rxrpc_call_completion);
David Howellsf66d7492016-04-04 14:00:34 +010028
David Howells17926a72007-04-26 15:48:28 -070029/*
David Howellsbe6e6702016-04-04 14:00:32 +010030 * Find the peer associated with an ICMP packet.
31 */
32static struct rxrpc_peer *rxrpc_lookup_peer_icmp_rcu(struct rxrpc_local *local,
David Howells494337c2018-05-10 23:26:01 +010033 const struct sk_buff *skb,
34 struct sockaddr_rxrpc *srx)
David Howellsbe6e6702016-04-04 14:00:32 +010035{
36 struct sock_exterr_skb *serr = SKB_EXT_ERR(skb);
David Howellsbe6e6702016-04-04 14:00:32 +010037
38 _enter("");
39
David Howells494337c2018-05-10 23:26:01 +010040 memset(srx, 0, sizeof(*srx));
41 srx->transport_type = local->srx.transport_type;
42 srx->transport_len = local->srx.transport_len;
43 srx->transport.family = local->srx.transport.family;
David Howellsbe6e6702016-04-04 14:00:32 +010044
45 /* Can we see an ICMP4 packet on an ICMP6 listening socket? and vice
46 * versa?
47 */
David Howells494337c2018-05-10 23:26:01 +010048 switch (srx->transport.family) {
David Howellsbe6e6702016-04-04 14:00:32 +010049 case AF_INET:
David Howells494337c2018-05-10 23:26:01 +010050 srx->transport.sin.sin_port = serr->port;
David Howellsbe6e6702016-04-04 14:00:32 +010051 switch (serr->ee.ee_origin) {
52 case SO_EE_ORIGIN_ICMP:
53 _net("Rx ICMP");
David Howells494337c2018-05-10 23:26:01 +010054 memcpy(&srx->transport.sin.sin_addr,
David Howellsbe6e6702016-04-04 14:00:32 +010055 skb_network_header(skb) + serr->addr_offset,
56 sizeof(struct in_addr));
57 break;
58 case SO_EE_ORIGIN_ICMP6:
59 _net("Rx ICMP6 on v4 sock");
David Howells494337c2018-05-10 23:26:01 +010060 memcpy(&srx->transport.sin.sin_addr,
David Howellsbe6e6702016-04-04 14:00:32 +010061 skb_network_header(skb) + serr->addr_offset + 12,
62 sizeof(struct in_addr));
63 break;
64 default:
David Howells494337c2018-05-10 23:26:01 +010065 memcpy(&srx->transport.sin.sin_addr, &ip_hdr(skb)->saddr,
David Howellsbe6e6702016-04-04 14:00:32 +010066 sizeof(struct in_addr));
67 break;
68 }
69 break;
70
David Howellsd1912742016-09-17 07:26:01 +010071#ifdef CONFIG_AF_RXRPC_IPV6
David Howells75b54cb2016-09-13 08:49:05 +010072 case AF_INET6:
David Howells494337c2018-05-10 23:26:01 +010073 srx->transport.sin6.sin6_port = serr->port;
David Howells75b54cb2016-09-13 08:49:05 +010074 switch (serr->ee.ee_origin) {
75 case SO_EE_ORIGIN_ICMP6:
76 _net("Rx ICMP6");
David Howells494337c2018-05-10 23:26:01 +010077 memcpy(&srx->transport.sin6.sin6_addr,
David Howells75b54cb2016-09-13 08:49:05 +010078 skb_network_header(skb) + serr->addr_offset,
79 sizeof(struct in6_addr));
80 break;
81 case SO_EE_ORIGIN_ICMP:
82 _net("Rx ICMP on v6 sock");
David Howells494337c2018-05-10 23:26:01 +010083 srx->transport.sin6.sin6_addr.s6_addr32[0] = 0;
84 srx->transport.sin6.sin6_addr.s6_addr32[1] = 0;
85 srx->transport.sin6.sin6_addr.s6_addr32[2] = htonl(0xffff);
86 memcpy(srx->transport.sin6.sin6_addr.s6_addr + 12,
David Howells75b54cb2016-09-13 08:49:05 +010087 skb_network_header(skb) + serr->addr_offset,
88 sizeof(struct in_addr));
89 break;
90 default:
David Howells494337c2018-05-10 23:26:01 +010091 memcpy(&srx->transport.sin6.sin6_addr,
David Howells75b54cb2016-09-13 08:49:05 +010092 &ipv6_hdr(skb)->saddr,
93 sizeof(struct in6_addr));
94 break;
95 }
96 break;
David Howellsd1912742016-09-17 07:26:01 +010097#endif
David Howells75b54cb2016-09-13 08:49:05 +010098
David Howellsbe6e6702016-04-04 14:00:32 +010099 default:
100 BUG();
101 }
102
David Howells494337c2018-05-10 23:26:01 +0100103 return rxrpc_lookup_peer_rcu(local, srx);
David Howellsbe6e6702016-04-04 14:00:32 +0100104}
105
106/*
David Howells1a70c052016-04-04 14:00:33 +0100107 * Handle an MTU/fragmentation problem.
108 */
109static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
110{
111 u32 mtu = serr->ee.ee_info;
112
113 _net("Rx ICMP Fragmentation Needed (%d)", mtu);
114
115 /* wind down the local interface MTU */
116 if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
117 peer->if_mtu = mtu;
118 _net("I/F MTU %u", mtu);
119 }
120
121 if (mtu == 0) {
122 /* they didn't give us a size, estimate one */
123 mtu = peer->if_mtu;
124 if (mtu > 1500) {
125 mtu >>= 1;
126 if (mtu < 1500)
127 mtu = 1500;
128 } else {
129 mtu -= 100;
130 if (mtu < peer->hdrsize)
131 mtu = peer->hdrsize + 4;
132 }
133 }
134
135 if (mtu < peer->mtu) {
136 spin_lock_bh(&peer->lock);
137 peer->mtu = mtu;
138 peer->maxdata = peer->mtu - peer->hdrsize;
139 spin_unlock_bh(&peer->lock);
140 _net("Net MTU %u (maxdata %u)",
141 peer->mtu, peer->maxdata);
142 }
143}
144
145/*
David Howellsf66d7492016-04-04 14:00:34 +0100146 * Handle an error received on the local endpoint.
David Howells17926a72007-04-26 15:48:28 -0700147 */
David Howellsabe89ef2016-04-04 14:00:32 +0100148void rxrpc_error_report(struct sock *sk)
David Howells17926a72007-04-26 15:48:28 -0700149{
150 struct sock_exterr_skb *serr;
David Howells494337c2018-05-10 23:26:01 +0100151 struct sockaddr_rxrpc srx;
David Howells17926a72007-04-26 15:48:28 -0700152 struct rxrpc_local *local = sk->sk_user_data;
153 struct rxrpc_peer *peer;
154 struct sk_buff *skb;
David Howells17926a72007-04-26 15:48:28 -0700155
156 _enter("%p{%d}", sk, local->debug_id);
157
Willem de Bruijn364a9e92014-08-31 21:30:27 -0400158 skb = sock_dequeue_err_skb(sk);
David Howells17926a72007-04-26 15:48:28 -0700159 if (!skb) {
160 _leave("UDP socket errqueue empty");
161 return;
162 }
David Howells71f3ca42016-09-17 10:49:14 +0100163 rxrpc_new_skb(skb, rxrpc_skb_rx_received);
Willem de Bruijnc247f052015-03-07 20:33:22 -0500164 serr = SKB_EXT_ERR(skb);
165 if (!skb->len && serr->ee.ee_origin == SO_EE_ORIGIN_TIMESTAMPING) {
Willem de Bruijn49ca0d82015-01-30 13:29:31 -0500166 _leave("UDP empty message");
David Howells71f3ca42016-09-17 10:49:14 +0100167 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
Willem de Bruijn49ca0d82015-01-30 13:29:31 -0500168 return;
169 }
David Howells17926a72007-04-26 15:48:28 -0700170
David Howellsbe6e6702016-04-04 14:00:32 +0100171 rcu_read_lock();
David Howells494337c2018-05-10 23:26:01 +0100172 peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
David Howellsbe6e6702016-04-04 14:00:32 +0100173 if (peer && !rxrpc_get_peer_maybe(peer))
174 peer = NULL;
175 if (!peer) {
176 rcu_read_unlock();
David Howells71f3ca42016-09-17 10:49:14 +0100177 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
David Howells17926a72007-04-26 15:48:28 -0700178 _leave(" [no peer]");
179 return;
180 }
181
David Howells494337c2018-05-10 23:26:01 +0100182 trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
183
David Howells1a70c052016-04-04 14:00:33 +0100184 if ((serr->ee.ee_origin == SO_EE_ORIGIN_ICMP &&
185 serr->ee.ee_type == ICMP_DEST_UNREACH &&
186 serr->ee.ee_code == ICMP_FRAG_NEEDED)) {
187 rxrpc_adjust_mtu(peer, serr);
David Howellsf66d7492016-04-04 14:00:34 +0100188 rcu_read_unlock();
David Howells71f3ca42016-09-17 10:49:14 +0100189 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
David Howellsf66d7492016-04-04 14:00:34 +0100190 rxrpc_put_peer(peer);
191 _leave(" [MTU update]");
192 return;
David Howells17926a72007-04-26 15:48:28 -0700193 }
194
David Howellsf66d7492016-04-04 14:00:34 +0100195 rxrpc_store_error(peer, serr);
David Howellsbe6e6702016-04-04 14:00:32 +0100196 rcu_read_unlock();
David Howells71f3ca42016-09-17 10:49:14 +0100197 rxrpc_free_skb(skb, rxrpc_skb_rx_freed);
David Howells17926a72007-04-26 15:48:28 -0700198
David Howells17926a72007-04-26 15:48:28 -0700199 _leave("");
200}
201
202/*
David Howellsf66d7492016-04-04 14:00:34 +0100203 * Map an error report to error codes on the peer record.
David Howells17926a72007-04-26 15:48:28 -0700204 */
David Howellsf66d7492016-04-04 14:00:34 +0100205static void rxrpc_store_error(struct rxrpc_peer *peer,
206 struct sock_exterr_skb *serr)
David Howells17926a72007-04-26 15:48:28 -0700207{
David Howellsf3344302018-09-27 15:13:09 +0100208 enum rxrpc_call_completion compl = RXRPC_CALL_NETWORK_ERROR;
David Howells17926a72007-04-26 15:48:28 -0700209 struct sock_extended_err *ee;
David S. Millerc9d10c42011-05-19 18:37:11 -0400210 int err;
David Howells17926a72007-04-26 15:48:28 -0700211
212 _enter("");
213
David Howells17926a72007-04-26 15:48:28 -0700214 ee = &serr->ee;
215
David Howells17926a72007-04-26 15:48:28 -0700216 err = ee->ee_errno;
217
218 switch (ee->ee_origin) {
219 case SO_EE_ORIGIN_ICMP:
David Howells17926a72007-04-26 15:48:28 -0700220 switch (ee->ee_type) {
221 case ICMP_DEST_UNREACH:
222 switch (ee->ee_code) {
223 case ICMP_NET_UNREACH:
224 _net("Rx Received ICMP Network Unreachable");
David Howells17926a72007-04-26 15:48:28 -0700225 break;
226 case ICMP_HOST_UNREACH:
227 _net("Rx Received ICMP Host Unreachable");
David Howells17926a72007-04-26 15:48:28 -0700228 break;
229 case ICMP_PORT_UNREACH:
230 _net("Rx Received ICMP Port Unreachable");
David Howells17926a72007-04-26 15:48:28 -0700231 break;
232 case ICMP_NET_UNKNOWN:
233 _net("Rx Received ICMP Unknown Network");
David Howells17926a72007-04-26 15:48:28 -0700234 break;
235 case ICMP_HOST_UNKNOWN:
236 _net("Rx Received ICMP Unknown Host");
David Howells17926a72007-04-26 15:48:28 -0700237 break;
238 default:
239 _net("Rx Received ICMP DestUnreach code=%u",
240 ee->ee_code);
241 break;
242 }
243 break;
244
245 case ICMP_TIME_EXCEEDED:
246 _net("Rx Received ICMP TTL Exceeded");
247 break;
248
249 default:
250 _proto("Rx Received ICMP error { type=%u code=%u }",
251 ee->ee_type, ee->ee_code);
252 break;
253 }
254 break;
255
David Howellsf66d7492016-04-04 14:00:34 +0100256 case SO_EE_ORIGIN_NONE:
David Howells17926a72007-04-26 15:48:28 -0700257 case SO_EE_ORIGIN_LOCAL:
David Howellsfe77d5f2016-04-04 14:00:34 +0100258 _proto("Rx Received local error { error=%d }", err);
David Howellsf3344302018-09-27 15:13:09 +0100259 compl = RXRPC_CALL_LOCAL_ERROR;
David Howells17926a72007-04-26 15:48:28 -0700260 break;
261
David Howells17926a72007-04-26 15:48:28 -0700262 case SO_EE_ORIGIN_ICMP6:
263 default:
David Howellsfe77d5f2016-04-04 14:00:34 +0100264 _proto("Rx Received error report { orig=%u }", ee->ee_origin);
David Howells17926a72007-04-26 15:48:28 -0700265 break;
266 }
267
David Howellsf3344302018-09-27 15:13:09 +0100268 rxrpc_distribute_error(peer, err, compl);
David Howellsf66d7492016-04-04 14:00:34 +0100269}
David Howells17926a72007-04-26 15:48:28 -0700270
David Howellsf66d7492016-04-04 14:00:34 +0100271/*
David Howellsf3344302018-09-27 15:13:09 +0100272 * Distribute an error that occurred on a peer.
David Howellsf66d7492016-04-04 14:00:34 +0100273 */
David Howellsf3344302018-09-27 15:13:09 +0100274static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
275 enum rxrpc_call_completion compl)
David Howellsf66d7492016-04-04 14:00:34 +0100276{
David Howellsf66d7492016-04-04 14:00:34 +0100277 struct rxrpc_call *call;
David Howells17926a72007-04-26 15:48:28 -0700278
David Howellsf3344302018-09-27 15:13:09 +0100279 hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
David Howellse34d4232016-08-30 09:49:29 +0100280 rxrpc_see_call(call);
David Howellsf3344302018-09-27 15:13:09 +0100281 if (call->state < RXRPC_CALL_COMPLETE &&
282 rxrpc_set_call_completion(call, compl, 0, -error))
David Howells248f2192016-09-08 11:10:12 +0100283 rxrpc_notify_socket(call);
David Howells17926a72007-04-26 15:48:28 -0700284 }
David Howells17926a72007-04-26 15:48:28 -0700285}
David Howellscf1a6472016-09-22 00:41:53 +0100286
287/*
288 * Add RTT information to cache. This is called in softirq mode and has
289 * exclusive access to the peer RTT data.
290 */
291void rxrpc_peer_add_rtt(struct rxrpc_call *call, enum rxrpc_rtt_rx_trace why,
292 rxrpc_serial_t send_serial, rxrpc_serial_t resp_serial,
293 ktime_t send_time, ktime_t resp_time)
294{
295 struct rxrpc_peer *peer = call->peer;
296 s64 rtt;
297 u64 sum = peer->rtt_sum, avg;
298 u8 cursor = peer->rtt_cursor, usage = peer->rtt_usage;
299
300 rtt = ktime_to_ns(ktime_sub(resp_time, send_time));
301 if (rtt < 0)
302 return;
303
David Howellsc1e15b42018-10-08 15:46:25 +0100304 spin_lock(&peer->rtt_input_lock);
305
David Howellscf1a6472016-09-22 00:41:53 +0100306 /* Replace the oldest datum in the RTT buffer */
307 sum -= peer->rtt_cache[cursor];
308 sum += rtt;
309 peer->rtt_cache[cursor] = rtt;
310 peer->rtt_cursor = (cursor + 1) & (RXRPC_RTT_CACHE_SIZE - 1);
311 peer->rtt_sum = sum;
312 if (usage < RXRPC_RTT_CACHE_SIZE) {
313 usage++;
314 peer->rtt_usage = usage;
315 }
316
David Howellsc1e15b42018-10-08 15:46:25 +0100317 spin_unlock(&peer->rtt_input_lock);
318
David Howellscf1a6472016-09-22 00:41:53 +0100319 /* Now recalculate the average */
320 if (usage == RXRPC_RTT_CACHE_SIZE) {
321 avg = sum / RXRPC_RTT_CACHE_SIZE;
322 } else {
323 avg = sum;
324 do_div(avg, usage);
325 }
326
David Howellsc1e15b42018-10-08 15:46:25 +0100327 /* Don't need to update this under lock */
David Howellscf1a6472016-09-22 00:41:53 +0100328 peer->rtt = avg;
329 trace_rxrpc_rtt_rx(call, why, send_serial, resp_serial, rtt,
330 usage, avg);
331}
David Howellsace45be2018-03-30 21:04:43 +0100332
333/*
David Howells330bdcf2018-08-08 11:30:02 +0100334 * Perform keep-alive pings.
335 */
336static void rxrpc_peer_keepalive_dispatch(struct rxrpc_net *rxnet,
337 struct list_head *collector,
338 time64_t base,
339 u8 cursor)
340{
341 struct rxrpc_peer *peer;
342 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
343 time64_t keepalive_at;
344 int slot;
345
346 spin_lock_bh(&rxnet->peer_hash_lock);
347
348 while (!list_empty(collector)) {
349 peer = list_entry(collector->next,
350 struct rxrpc_peer, keepalive_link);
351
352 list_del_init(&peer->keepalive_link);
353 if (!rxrpc_get_peer_maybe(peer))
354 continue;
355
356 spin_unlock_bh(&rxnet->peer_hash_lock);
357
358 keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
359 slot = keepalive_at - base;
360 _debug("%02x peer %u t=%d {%pISp}",
361 cursor, peer->debug_id, slot, &peer->srx.transport);
362
363 if (keepalive_at <= base ||
364 keepalive_at > base + RXRPC_KEEPALIVE_TIME) {
365 rxrpc_send_keepalive(peer);
366 slot = RXRPC_KEEPALIVE_TIME;
367 }
368
369 /* A transmission to this peer occurred since last we examined
370 * it so put it into the appropriate future bucket.
371 */
372 slot += cursor;
373 slot &= mask;
374 spin_lock_bh(&rxnet->peer_hash_lock);
375 list_add_tail(&peer->keepalive_link,
376 &rxnet->peer_keepalive[slot & mask]);
377 rxrpc_put_peer(peer);
378 }
379
380 spin_unlock_bh(&rxnet->peer_hash_lock);
381}
382
383/*
David Howellsace45be2018-03-30 21:04:43 +0100384 * Perform keep-alive pings with VERSION packets to keep any NAT alive.
385 */
386void rxrpc_peer_keepalive_worker(struct work_struct *work)
387{
388 struct rxrpc_net *rxnet =
389 container_of(work, struct rxrpc_net, peer_keepalive_work);
David Howells330bdcf2018-08-08 11:30:02 +0100390 const u8 mask = ARRAY_SIZE(rxnet->peer_keepalive) - 1;
391 time64_t base, now, delay;
392 u8 cursor, stop;
393 LIST_HEAD(collector);
David Howellsace45be2018-03-30 21:04:43 +0100394
David Howells330bdcf2018-08-08 11:30:02 +0100395 now = ktime_get_seconds();
David Howellsace45be2018-03-30 21:04:43 +0100396 base = rxnet->peer_keepalive_base;
397 cursor = rxnet->peer_keepalive_cursor;
David Howells330bdcf2018-08-08 11:30:02 +0100398 _enter("%lld,%u", base - now, cursor);
David Howellsace45be2018-03-30 21:04:43 +0100399
David Howells330bdcf2018-08-08 11:30:02 +0100400 if (!rxnet->live)
401 return;
David Howellsace45be2018-03-30 21:04:43 +0100402
David Howells330bdcf2018-08-08 11:30:02 +0100403 /* Remove to a temporary list all the peers that are currently lodged
404 * in expired buckets plus all new peers.
405 *
406 * Everything in the bucket at the cursor is processed this
407 * second; the bucket at cursor + 1 goes at now + 1s and so
408 * on...
David Howellsace45be2018-03-30 21:04:43 +0100409 */
David Howells330bdcf2018-08-08 11:30:02 +0100410 spin_lock_bh(&rxnet->peer_hash_lock);
411 list_splice_init(&rxnet->peer_keepalive_new, &collector);
David Howellsace45be2018-03-30 21:04:43 +0100412
David Howells330bdcf2018-08-08 11:30:02 +0100413 stop = cursor + ARRAY_SIZE(rxnet->peer_keepalive);
414 while (base <= now && (s8)(cursor - stop) < 0) {
415 list_splice_tail_init(&rxnet->peer_keepalive[cursor & mask],
416 &collector);
417 base++;
418 cursor++;
David Howellsace45be2018-03-30 21:04:43 +0100419 }
420
David Howells330bdcf2018-08-08 11:30:02 +0100421 base = now;
David Howellsace45be2018-03-30 21:04:43 +0100422 spin_unlock_bh(&rxnet->peer_hash_lock);
423
David Howellsace45be2018-03-30 21:04:43 +0100424 rxnet->peer_keepalive_base = base;
425 rxnet->peer_keepalive_cursor = cursor;
David Howells330bdcf2018-08-08 11:30:02 +0100426 rxrpc_peer_keepalive_dispatch(rxnet, &collector, base, cursor);
427 ASSERT(list_empty(&collector));
428
429 /* Schedule the timer for the next occupied timeslot. */
430 cursor = rxnet->peer_keepalive_cursor;
431 stop = cursor + RXRPC_KEEPALIVE_TIME - 1;
432 for (; (s8)(cursor - stop) < 0; cursor++) {
433 if (!list_empty(&rxnet->peer_keepalive[cursor & mask]))
434 break;
435 base++;
436 }
437
438 now = ktime_get_seconds();
439 delay = base - now;
440 if (delay < 1)
441 delay = 1;
442 delay *= HZ;
443 if (rxnet->live)
444 timer_reduce(&rxnet->peer_keepalive_timer, jiffies + delay);
445
David Howellsace45be2018-03-30 21:04:43 +0100446 _leave("");
447}