blob: 1734339b6dd0fe0a656b647b5bb38dd0c7412cdb [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Herbert Xu406ef772007-10-08 17:16:30 -07002/*
3 * xfrm_output.c - Common IPsec encapsulation code.
4 *
5 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
Herbert Xu406ef772007-10-08 17:16:30 -07006 */
7
8#include <linux/errno.h>
9#include <linux/module.h>
10#include <linux/netdevice.h>
Herbert Xu862b82c2007-11-13 21:43:11 -080011#include <linux/netfilter.h>
Herbert Xu406ef772007-10-08 17:16:30 -070012#include <linux/skbuff.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Herbert Xu406ef772007-10-08 17:16:30 -070014#include <linux/spinlock.h>
Herbert Xu406ef772007-10-08 17:16:30 -070015#include <net/dst.h>
Florian Westphal6d64be32020-05-04 10:06:03 +020016#include <net/icmp.h>
Florian Westphal1de70832019-03-29 21:16:29 +010017#include <net/inet_ecn.h>
Herbert Xu406ef772007-10-08 17:16:30 -070018#include <net/xfrm.h>
19
Florian Westphalf3075f482020-05-04 10:06:08 +020020#if IS_ENABLED(CONFIG_IPV6)
21#include <net/ip6_route.h>
22#include <net/ipv6_stubs.h>
23#endif
24
Florian Westphal1de70832019-03-29 21:16:29 +010025#include "xfrm_inout.h"
26
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -050027static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb);
Florian Westphal0c620e92019-03-29 21:16:25 +010028static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb);
Herbert Xuc6581a42007-11-13 21:43:43 -080029
Steffen Klassert26b20722012-03-21 23:32:39 +000030static int xfrm_skb_check_space(struct sk_buff *skb)
Herbert Xu83815de2007-10-08 17:25:08 -070031{
Eric Dumazetadf30902009-06-02 05:19:30 +000032 struct dst_entry *dst = skb_dst(skb);
Herbert Xu550ade82007-11-13 21:33:01 -080033 int nhead = dst->header_len + LL_RESERVED_SPACE(dst->dev)
Herbert Xu83815de2007-10-08 17:25:08 -070034 - skb_headroom(skb);
Johannes Bergf5184d22008-05-12 20:48:31 -070035 int ntail = dst->dev->needed_tailroom - skb_tailroom(skb);
Herbert Xu83815de2007-10-08 17:25:08 -070036
Herbert Xud01dbeb2008-09-30 02:03:19 -070037 if (nhead <= 0) {
38 if (ntail <= 0)
39 return 0;
40 nhead = 0;
41 } else if (ntail < 0)
42 ntail = 0;
Herbert Xu83815de2007-10-08 17:25:08 -070043
Herbert Xud01dbeb2008-09-30 02:03:19 -070044 return pskb_expand_head(skb, nhead, ntail, GFP_ATOMIC);
Herbert Xu83815de2007-10-08 17:25:08 -070045}
46
Ying Xue9449c3c2015-05-12 18:29:44 +080047/* Children define the path of the packet through the
48 * Linux networking. Thus, destinations are stackable.
49 */
50
51static struct dst_entry *skb_dst_pop(struct sk_buff *skb)
52{
David Millerb92cf4a2017-11-28 15:40:22 -050053 struct dst_entry *child = dst_clone(xfrm_dst_child(skb_dst(skb)));
Ying Xue9449c3c2015-05-12 18:29:44 +080054
55 skb_dst_drop(skb);
56 return child;
57}
58
Florian Westphal0c620e92019-03-29 21:16:25 +010059/* Add encapsulation header.
60 *
61 * The IP header will be moved forward to make space for the encapsulation
62 * header.
63 */
64static int xfrm4_transport_output(struct xfrm_state *x, struct sk_buff *skb)
65{
Florian Westphal0c620e92019-03-29 21:16:25 +010066 struct iphdr *iph = ip_hdr(skb);
67 int ihl = iph->ihl * 4;
68
69 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
70
71 skb_set_network_header(skb, -x->props.header_len);
72 skb->mac_header = skb->network_header +
73 offsetof(struct iphdr, protocol);
74 skb->transport_header = skb->network_header + ihl;
75 __skb_pull(skb, ihl);
76 memmove(skb_network_header(skb), iph, ihl);
77 return 0;
Florian Westphal0c620e92019-03-29 21:16:25 +010078}
79
Florian Westphal37b9e7e2021-06-11 12:50:11 +020080#if IS_ENABLED(CONFIG_IPV6_MIP6)
81static int mip6_destopt_offset(struct xfrm_state *x, struct sk_buff *skb,
82 u8 **nexthdr)
83{
84 u16 offset = sizeof(struct ipv6hdr);
85 struct ipv6_opt_hdr *exthdr =
86 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
87 const unsigned char *nh = skb_network_header(skb);
88 unsigned int packet_len = skb_tail_pointer(skb) -
89 skb_network_header(skb);
90 int found_rhdr = 0;
91
92 *nexthdr = &ipv6_hdr(skb)->nexthdr;
93
94 while (offset + 1 <= packet_len) {
95 switch (**nexthdr) {
96 case NEXTHDR_HOP:
97 break;
98 case NEXTHDR_ROUTING:
99 found_rhdr = 1;
100 break;
101 case NEXTHDR_DEST:
102 /* HAO MUST NOT appear more than once.
103 * XXX: It is better to try to find by the end of
104 * XXX: packet if HAO exists.
105 */
106 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0) {
107 net_dbg_ratelimited("mip6: hao exists already, override\n");
108 return offset;
109 }
110
111 if (found_rhdr)
112 return offset;
113
114 break;
115 default:
116 return offset;
117 }
118
119 offset += ipv6_optlen(exthdr);
120 *nexthdr = &exthdr->nexthdr;
121 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
122 }
123
124 return offset;
125}
Florian Westphal848b18f2021-06-11 12:50:12 +0200126
127static int mip6_rthdr_offset(struct xfrm_state *x, struct sk_buff *skb,
128 u8 **nexthdr)
129{
130 u16 offset = sizeof(struct ipv6hdr);
131 struct ipv6_opt_hdr *exthdr =
132 (struct ipv6_opt_hdr *)(ipv6_hdr(skb) + 1);
133 const unsigned char *nh = skb_network_header(skb);
134 unsigned int packet_len = skb_tail_pointer(skb) -
135 skb_network_header(skb);
136 int found_rhdr = 0;
137
138 *nexthdr = &ipv6_hdr(skb)->nexthdr;
139
140 while (offset + 1 <= packet_len) {
141 switch (**nexthdr) {
142 case NEXTHDR_HOP:
143 break;
144 case NEXTHDR_ROUTING:
145 if (offset + 3 <= packet_len) {
146 struct ipv6_rt_hdr *rt;
147
148 rt = (struct ipv6_rt_hdr *)(nh + offset);
149 if (rt->type != 0)
150 return offset;
151 }
152 found_rhdr = 1;
153 break;
154 case NEXTHDR_DEST:
155 if (ipv6_find_tlv(skb, offset, IPV6_TLV_HAO) >= 0)
156 return offset;
157
158 if (found_rhdr)
159 return offset;
160
161 break;
162 default:
163 return offset;
164 }
165
166 offset += ipv6_optlen(exthdr);
167 *nexthdr = &exthdr->nexthdr;
168 exthdr = (struct ipv6_opt_hdr *)(nh + offset);
169 }
170
171 return offset;
172}
Florian Westphal37b9e7e2021-06-11 12:50:11 +0200173#endif
174
Florian Westphal9acf4d32021-06-11 12:50:10 +0200175static int xfrm6_hdr_offset(struct xfrm_state *x, struct sk_buff *skb, u8 **prevhdr)
176{
Florian Westphal37b9e7e2021-06-11 12:50:11 +0200177 switch (x->type->proto) {
178#if IS_ENABLED(CONFIG_IPV6_MIP6)
179 case IPPROTO_DSTOPTS:
180 return mip6_destopt_offset(x, skb, prevhdr);
Florian Westphal848b18f2021-06-11 12:50:12 +0200181 case IPPROTO_ROUTING:
182 return mip6_rthdr_offset(x, skb, prevhdr);
Florian Westphal37b9e7e2021-06-11 12:50:11 +0200183#endif
184 default:
185 break;
186 }
187
Florian Westphal9acf4d32021-06-11 12:50:10 +0200188 return x->type->hdr_offset(x, skb, prevhdr);
189}
190
Florian Westphal0c620e92019-03-29 21:16:25 +0100191/* Add encapsulation header.
192 *
193 * The IP header and mutable extension headers will be moved forward to make
194 * space for the encapsulation header.
195 */
196static int xfrm6_transport_output(struct xfrm_state *x, struct sk_buff *skb)
197{
Florian Westphal4c145dc2019-03-29 21:16:31 +0100198#if IS_ENABLED(CONFIG_IPV6)
Florian Westphal0c620e92019-03-29 21:16:25 +0100199 struct ipv6hdr *iph;
200 u8 *prevhdr;
201 int hdr_len;
202
203 iph = ipv6_hdr(skb);
204 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
205
Florian Westphal9acf4d32021-06-11 12:50:10 +0200206 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
Florian Westphal0c620e92019-03-29 21:16:25 +0100207 if (hdr_len < 0)
208 return hdr_len;
209 skb_set_mac_header(skb,
210 (prevhdr - x->props.header_len) - skb->data);
211 skb_set_network_header(skb, -x->props.header_len);
212 skb->transport_header = skb->network_header + hdr_len;
213 __skb_pull(skb, hdr_len);
214 memmove(ipv6_hdr(skb), iph, hdr_len);
215 return 0;
216#else
217 WARN_ON_ONCE(1);
Florian Westphal4c145dc2019-03-29 21:16:31 +0100218 return -EAFNOSUPPORT;
Florian Westphal0c620e92019-03-29 21:16:25 +0100219#endif
220}
221
222/* Add route optimization header space.
223 *
224 * The IP header and mutable extension headers will be moved forward to make
225 * space for the route optimization header.
226 */
227static int xfrm6_ro_output(struct xfrm_state *x, struct sk_buff *skb)
228{
Florian Westphal4c145dc2019-03-29 21:16:31 +0100229#if IS_ENABLED(CONFIG_IPV6)
Florian Westphal0c620e92019-03-29 21:16:25 +0100230 struct ipv6hdr *iph;
231 u8 *prevhdr;
232 int hdr_len;
233
234 iph = ipv6_hdr(skb);
235
Florian Westphal9acf4d32021-06-11 12:50:10 +0200236 hdr_len = xfrm6_hdr_offset(x, skb, &prevhdr);
Florian Westphal0c620e92019-03-29 21:16:25 +0100237 if (hdr_len < 0)
238 return hdr_len;
239 skb_set_mac_header(skb,
240 (prevhdr - x->props.header_len) - skb->data);
241 skb_set_network_header(skb, -x->props.header_len);
242 skb->transport_header = skb->network_header + hdr_len;
243 __skb_pull(skb, hdr_len);
244 memmove(ipv6_hdr(skb), iph, hdr_len);
245
246 x->lastused = ktime_get_real_seconds();
247
248 return 0;
249#else
250 WARN_ON_ONCE(1);
Florian Westphal4c145dc2019-03-29 21:16:31 +0100251 return -EAFNOSUPPORT;
Florian Westphal0c620e92019-03-29 21:16:25 +0100252#endif
253}
254
Florian Westphal1de70832019-03-29 21:16:29 +0100255/* Add encapsulation header.
256 *
257 * The top IP header will be constructed per draft-nikander-esp-beet-mode-06.txt.
258 */
259static int xfrm4_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
260{
261 struct ip_beet_phdr *ph;
262 struct iphdr *top_iph;
263 int hdrlen, optlen;
264
265 hdrlen = 0;
266 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
267 if (unlikely(optlen))
268 hdrlen += IPV4_BEET_PHMAXLEN - (optlen & 4);
269
270 skb_set_network_header(skb, -x->props.header_len - hdrlen +
271 (XFRM_MODE_SKB_CB(skb)->ihl - sizeof(*top_iph)));
272 if (x->sel.family != AF_INET6)
273 skb->network_header += IPV4_BEET_PHMAXLEN;
274 skb->mac_header = skb->network_header +
275 offsetof(struct iphdr, protocol);
276 skb->transport_header = skb->network_header + sizeof(*top_iph);
277
278 xfrm4_beet_make_header(skb);
279
280 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdrlen);
281
282 top_iph = ip_hdr(skb);
283
284 if (unlikely(optlen)) {
285 if (WARN_ON(optlen < 0))
286 return -EINVAL;
287
288 ph->padlen = 4 - (optlen & 4);
289 ph->hdrlen = optlen / 8;
290 ph->nexthdr = top_iph->protocol;
291 if (ph->padlen)
292 memset(ph + 1, IPOPT_NOP, ph->padlen);
293
294 top_iph->protocol = IPPROTO_BEETPH;
295 top_iph->ihl = sizeof(struct iphdr) / 4;
296 }
297
298 top_iph->saddr = x->props.saddr.a4;
299 top_iph->daddr = x->id.daddr.a4;
300
301 return 0;
302}
303
304/* Add encapsulation header.
305 *
306 * The top IP header will be constructed per RFC 2401.
307 */
308static int xfrm4_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
309{
310 struct dst_entry *dst = skb_dst(skb);
311 struct iphdr *top_iph;
312 int flags;
313
314 skb_set_inner_network_header(skb, skb_network_offset(skb));
315 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
316
317 skb_set_network_header(skb, -x->props.header_len);
318 skb->mac_header = skb->network_header +
319 offsetof(struct iphdr, protocol);
320 skb->transport_header = skb->network_header + sizeof(*top_iph);
321 top_iph = ip_hdr(skb);
322
323 top_iph->ihl = 5;
324 top_iph->version = 4;
325
326 top_iph->protocol = xfrm_af2proto(skb_dst(skb)->ops->family);
327
328 /* DS disclosing depends on XFRM_SA_XFLAG_DONT_ENCAP_DSCP */
329 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
330 top_iph->tos = 0;
331 else
332 top_iph->tos = XFRM_MODE_SKB_CB(skb)->tos;
333 top_iph->tos = INET_ECN_encapsulate(top_iph->tos,
334 XFRM_MODE_SKB_CB(skb)->tos);
335
336 flags = x->props.flags;
337 if (flags & XFRM_STATE_NOECN)
338 IP_ECN_clear(top_iph);
339
340 top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ?
341 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF));
342
343 top_iph->ttl = ip4_dst_hoplimit(xfrm_dst_child(dst));
344
345 top_iph->saddr = x->props.saddr.a4;
346 top_iph->daddr = x->id.daddr.a4;
347 ip_select_ident(dev_net(dst->dev), skb, NULL);
348
349 return 0;
350}
351
352#if IS_ENABLED(CONFIG_IPV6)
353static int xfrm6_tunnel_encap_add(struct xfrm_state *x, struct sk_buff *skb)
354{
355 struct dst_entry *dst = skb_dst(skb);
356 struct ipv6hdr *top_iph;
357 int dsfield;
358
359 skb_set_inner_network_header(skb, skb_network_offset(skb));
360 skb_set_inner_transport_header(skb, skb_transport_offset(skb));
361
362 skb_set_network_header(skb, -x->props.header_len);
363 skb->mac_header = skb->network_header +
364 offsetof(struct ipv6hdr, nexthdr);
365 skb->transport_header = skb->network_header + sizeof(*top_iph);
366 top_iph = ipv6_hdr(skb);
367
368 top_iph->version = 6;
369
370 memcpy(top_iph->flow_lbl, XFRM_MODE_SKB_CB(skb)->flow_lbl,
371 sizeof(top_iph->flow_lbl));
372 top_iph->nexthdr = xfrm_af2proto(skb_dst(skb)->ops->family);
373
374 if (x->props.extra_flags & XFRM_SA_XFLAG_DONT_ENCAP_DSCP)
375 dsfield = 0;
376 else
377 dsfield = XFRM_MODE_SKB_CB(skb)->tos;
378 dsfield = INET_ECN_encapsulate(dsfield, XFRM_MODE_SKB_CB(skb)->tos);
379 if (x->props.flags & XFRM_STATE_NOECN)
380 dsfield &= ~INET_ECN_MASK;
381 ipv6_change_dsfield(top_iph, 0, dsfield);
382 top_iph->hop_limit = ip6_dst_hoplimit(xfrm_dst_child(dst));
383 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
384 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
385 return 0;
386}
387
388static int xfrm6_beet_encap_add(struct xfrm_state *x, struct sk_buff *skb)
389{
390 struct ipv6hdr *top_iph;
391 struct ip_beet_phdr *ph;
392 int optlen, hdr_len;
393
394 hdr_len = 0;
395 optlen = XFRM_MODE_SKB_CB(skb)->optlen;
396 if (unlikely(optlen))
397 hdr_len += IPV4_BEET_PHMAXLEN - (optlen & 4);
398
399 skb_set_network_header(skb, -x->props.header_len - hdr_len);
400 if (x->sel.family != AF_INET6)
401 skb->network_header += IPV4_BEET_PHMAXLEN;
402 skb->mac_header = skb->network_header +
403 offsetof(struct ipv6hdr, nexthdr);
404 skb->transport_header = skb->network_header + sizeof(*top_iph);
405 ph = __skb_pull(skb, XFRM_MODE_SKB_CB(skb)->ihl - hdr_len);
406
407 xfrm6_beet_make_header(skb);
408
409 top_iph = ipv6_hdr(skb);
410 if (unlikely(optlen)) {
411 if (WARN_ON(optlen < 0))
412 return -EINVAL;
413
414 ph->padlen = 4 - (optlen & 4);
415 ph->hdrlen = optlen / 8;
416 ph->nexthdr = top_iph->nexthdr;
417 if (ph->padlen)
418 memset(ph + 1, IPOPT_NOP, ph->padlen);
419
420 top_iph->nexthdr = IPPROTO_BEETPH;
421 }
422
423 top_iph->saddr = *(struct in6_addr *)&x->props.saddr;
424 top_iph->daddr = *(struct in6_addr *)&x->id.daddr;
425 return 0;
426}
427#endif
428
429/* Add encapsulation header.
430 *
431 * On exit, the transport header will be set to the start of the
432 * encapsulation header to be filled in by x->type->output and the mac
433 * header will be set to the nextheader (protocol for IPv4) field of the
434 * extension header directly preceding the encapsulation header, or in
435 * its absence, that of the top IP header.
436 * The value of the network header will always point to the top IP header
437 * while skb->data will point to the payload.
438 */
Florian Westphal0c620e92019-03-29 21:16:25 +0100439static int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
440{
441 int err;
442
443 err = xfrm_inner_extract_output(x, skb);
444 if (err)
445 return err;
446
447 IPCB(skb)->flags |= IPSKB_XFRM_TUNNEL_SIZE;
448 skb->protocol = htons(ETH_P_IP);
449
Florian Westphalc9500d72019-03-29 21:16:32 +0100450 switch (x->outer_mode.encap) {
Florian Westphal1de70832019-03-29 21:16:29 +0100451 case XFRM_MODE_BEET:
452 return xfrm4_beet_encap_add(x, skb);
453 case XFRM_MODE_TUNNEL:
454 return xfrm4_tunnel_encap_add(x, skb);
455 }
456
457 WARN_ON_ONCE(1);
458 return -EOPNOTSUPP;
Florian Westphal0c620e92019-03-29 21:16:25 +0100459}
460
461static int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb)
462{
463#if IS_ENABLED(CONFIG_IPV6)
464 int err;
465
466 err = xfrm_inner_extract_output(x, skb);
467 if (err)
468 return err;
469
470 skb->ignore_df = 1;
471 skb->protocol = htons(ETH_P_IPV6);
472
Florian Westphalc9500d72019-03-29 21:16:32 +0100473 switch (x->outer_mode.encap) {
Florian Westphal1de70832019-03-29 21:16:29 +0100474 case XFRM_MODE_BEET:
475 return xfrm6_beet_encap_add(x, skb);
476 case XFRM_MODE_TUNNEL:
477 return xfrm6_tunnel_encap_add(x, skb);
478 default:
479 WARN_ON_ONCE(1);
480 return -EOPNOTSUPP;
481 }
Florian Westphal0c620e92019-03-29 21:16:25 +0100482#endif
Florian Westphal1de70832019-03-29 21:16:29 +0100483 WARN_ON_ONCE(1);
484 return -EAFNOSUPPORT;
Florian Westphal0c620e92019-03-29 21:16:25 +0100485}
486
487static int xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
488{
Florian Westphalc9500d72019-03-29 21:16:32 +0100489 switch (x->outer_mode.encap) {
Florian Westphal0c620e92019-03-29 21:16:25 +0100490 case XFRM_MODE_BEET:
491 case XFRM_MODE_TUNNEL:
Florian Westphalc9500d72019-03-29 21:16:32 +0100492 if (x->outer_mode.family == AF_INET)
Florian Westphal0c620e92019-03-29 21:16:25 +0100493 return xfrm4_prepare_output(x, skb);
Florian Westphalc9500d72019-03-29 21:16:32 +0100494 if (x->outer_mode.family == AF_INET6)
Florian Westphal0c620e92019-03-29 21:16:25 +0100495 return xfrm6_prepare_output(x, skb);
496 break;
497 case XFRM_MODE_TRANSPORT:
Florian Westphalc9500d72019-03-29 21:16:32 +0100498 if (x->outer_mode.family == AF_INET)
Florian Westphal0c620e92019-03-29 21:16:25 +0100499 return xfrm4_transport_output(x, skb);
Florian Westphalc9500d72019-03-29 21:16:32 +0100500 if (x->outer_mode.family == AF_INET6)
Florian Westphal0c620e92019-03-29 21:16:25 +0100501 return xfrm6_transport_output(x, skb);
502 break;
503 case XFRM_MODE_ROUTEOPTIMIZATION:
Florian Westphalc9500d72019-03-29 21:16:32 +0100504 if (x->outer_mode.family == AF_INET6)
Florian Westphal0c620e92019-03-29 21:16:25 +0100505 return xfrm6_ro_output(x, skb);
506 WARN_ON_ONCE(1);
507 break;
508 default:
509 WARN_ON_ONCE(1);
510 break;
511 }
512
513 return -EOPNOTSUPP;
514}
515
516#if IS_ENABLED(CONFIG_NET_PKTGEN)
517int pktgen_xfrm_outer_mode_output(struct xfrm_state *x, struct sk_buff *skb)
518{
519 return xfrm_outer_mode_output(x, skb);
520}
521EXPORT_SYMBOL_GPL(pktgen_xfrm_outer_mode_output);
522#endif
523
Herbert Xuc6581a42007-11-13 21:43:43 -0800524static int xfrm_output_one(struct sk_buff *skb, int err)
Herbert Xu406ef772007-10-08 17:16:30 -0700525{
Eric Dumazetadf30902009-06-02 05:19:30 +0000526 struct dst_entry *dst = skb_dst(skb);
Herbert Xu406ef772007-10-08 17:16:30 -0700527 struct xfrm_state *x = dst->xfrm;
Alexey Dobriyana6483b72008-11-25 17:38:20 -0800528 struct net *net = xs_net(x);
Herbert Xu406ef772007-10-08 17:16:30 -0700529
Herbert Xuc6581a42007-11-13 21:43:43 -0800530 if (err <= 0)
531 goto resume;
Herbert Xu406ef772007-10-08 17:16:30 -0700532
533 do {
Steffen Klassert26b20722012-03-21 23:32:39 +0000534 err = xfrm_skb_check_space(skb);
Masahide NAKAMURAb15c4bc2007-12-24 16:00:09 -0800535 if (err) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -0800536 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
Herbert Xu910ef702007-12-18 22:14:25 -0800537 goto error_nolock;
Masahide NAKAMURAb15c4bc2007-12-24 16:00:09 -0800538 }
Herbert Xu910ef702007-12-18 22:14:25 -0800539
Steffen Klassert9b42c1f2018-06-12 12:44:26 +0200540 skb->mark = xfrm_smark_get(skb->mark, x);
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900541
Florian Westphal0c620e92019-03-29 21:16:25 +0100542 err = xfrm_outer_mode_output(x, skb);
Masahide NAKAMURAb15c4bc2007-12-24 16:00:09 -0800543 if (err) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -0800544 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEMODEERROR);
Herbert Xu910ef702007-12-18 22:14:25 -0800545 goto error_nolock;
Masahide NAKAMURAb15c4bc2007-12-24 16:00:09 -0800546 }
Herbert Xua2deb6d22007-11-13 21:39:38 -0800547
Herbert Xu406ef772007-10-08 17:16:30 -0700548 spin_lock_bh(&x->lock);
Li RongQingbb65a9c2012-12-28 16:06:28 +0800549
550 if (unlikely(x->km.state != XFRM_STATE_VALID)) {
551 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
Timo Teräs497574c2013-05-22 01:40:47 +0000552 err = -EINVAL;
Li RongQingfa8599d2013-02-01 13:17:16 +0800553 goto error;
Li RongQingbb65a9c2012-12-28 16:06:28 +0800554 }
555
Herbert Xu910ef702007-12-18 22:14:25 -0800556 err = xfrm_state_check_expire(x);
Masahide NAKAMURAb15c4bc2007-12-24 16:00:09 -0800557 if (err) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -0800558 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEEXPIRED);
Herbert Xu406ef772007-10-08 17:16:30 -0700559 goto error;
Masahide NAKAMURAb15c4bc2007-12-24 16:00:09 -0800560 }
Herbert Xu406ef772007-10-08 17:16:30 -0700561
Steffen Klassert9fdc4882011-03-08 00:08:32 +0000562 err = x->repl->overflow(x, skb);
563 if (err) {
564 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATESEQERROR);
565 goto error;
Herbert Xu436a0a42007-10-08 17:25:53 -0700566 }
567
Herbert Xu406ef772007-10-08 17:16:30 -0700568 x->curlft.bytes += skb->len;
569 x->curlft.packets++;
570
Herbert Xu406ef772007-10-08 17:16:30 -0700571 spin_unlock_bh(&x->lock);
572
Steffen Klassert3bc07322011-03-15 21:08:28 +0000573 skb_dst_force(skb);
Steffen Klassert9e143792018-09-11 10:31:15 +0200574 if (!skb_dst(skb)) {
575 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
Wei Yongjun533555e2018-10-27 06:12:06 +0000576 err = -EHOSTUNREACH;
Steffen Klassert9e143792018-09-11 10:31:15 +0200577 goto error_nolock;
578 }
Steffen Klassert3bc07322011-03-15 21:08:28 +0000579
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200580 if (xfrm_offload(skb)) {
581 x->type_offload->encap(x, skb);
582 } else {
Steffen Klassert73b9fc42017-10-30 10:04:04 +0100583 /* Inner headers are invalid now. */
584 skb->encapsulation = 0;
585
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200586 err = x->type->output(x, skb);
587 if (err == -EINPROGRESS)
588 goto out;
589 }
Herbert Xuc6581a42007-11-13 21:43:43 -0800590
591resume:
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -0800592 if (err) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -0800593 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEPROTOERROR);
Herbert Xub7c6538c2007-10-09 13:33:35 -0700594 goto error_nolock;
Masahide NAKAMURA0aa64772007-12-20 20:43:36 -0800595 }
Herbert Xub7c6538c2007-10-09 13:33:35 -0700596
Steffen Klassert8764ab22010-06-04 01:57:38 +0000597 dst = skb_dst_pop(skb);
Eric Dumazetadf30902009-06-02 05:19:30 +0000598 if (!dst) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -0800599 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
Herbert Xu406ef772007-10-08 17:16:30 -0700600 err = -EHOSTUNREACH;
601 goto error_nolock;
602 }
Steffen Klasserte4334302011-03-15 21:09:32 +0000603 skb_dst_set(skb, dst);
Herbert Xu406ef772007-10-08 17:16:30 -0700604 x = dst->xfrm;
Florian Westphalc9500d72019-03-29 21:16:32 +0100605 } while (x && !(x->outer_mode.flags & XFRM_MODE_FLAG_TUNNEL));
Herbert Xu406ef772007-10-08 17:16:30 -0700606
Jean Sacrenebd46872013-06-01 16:23:15 +0000607 return 0;
Herbert Xu406ef772007-10-08 17:16:30 -0700608
Herbert Xu406ef772007-10-08 17:16:30 -0700609error:
610 spin_unlock_bh(&x->lock);
Herbert Xu862b82c2007-11-13 21:43:11 -0800611error_nolock:
612 kfree_skb(skb);
Jean Sacrenebd46872013-06-01 16:23:15 +0000613out:
614 return err;
Herbert Xu862b82c2007-11-13 21:43:11 -0800615}
616
Evan Nimmo9ab12652021-03-02 08:00:04 +1300617int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
Herbert Xu862b82c2007-11-13 21:43:11 -0800618{
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500619 struct net *net = xs_net(skb_dst(skb)->xfrm);
Eric W. Biedermanbe10de02015-09-17 17:21:31 -0500620
Herbert Xuc6581a42007-11-13 21:43:43 -0800621 while (likely((err = xfrm_output_one(skb, err)) == 0)) {
Florian Westphal895b5c92019-09-29 20:54:03 +0200622 nf_reset_ct(skb);
Herbert Xu862b82c2007-11-13 21:43:11 -0800623
Evan Nimmo9ab12652021-03-02 08:00:04 +1300624 err = skb_dst(skb)->ops->local_out(net, sk, skb);
Herbert Xu862b82c2007-11-13 21:43:11 -0800625 if (unlikely(err != 1))
Herbert Xuc6581a42007-11-13 21:43:43 -0800626 goto out;
Herbert Xu862b82c2007-11-13 21:43:11 -0800627
Eric Dumazetadf30902009-06-02 05:19:30 +0000628 if (!skb_dst(skb)->xfrm)
Evan Nimmo9ab12652021-03-02 08:00:04 +1300629 return dst_output(net, sk, skb);
Herbert Xu862b82c2007-11-13 21:43:11 -0800630
Eric Dumazetadf30902009-06-02 05:19:30 +0000631 err = nf_hook(skb_dst(skb)->ops->family,
Evan Nimmo9ab12652021-03-02 08:00:04 +1300632 NF_INET_POST_ROUTING, net, sk, skb,
Eric Dumazetadf30902009-06-02 05:19:30 +0000633 NULL, skb_dst(skb)->dev, xfrm_output2);
Herbert Xu862b82c2007-11-13 21:43:11 -0800634 if (unlikely(err != 1))
Herbert Xuc6581a42007-11-13 21:43:43 -0800635 goto out;
Herbert Xu862b82c2007-11-13 21:43:11 -0800636 }
637
Herbert Xuc6581a42007-11-13 21:43:43 -0800638 if (err == -EINPROGRESS)
639 err = 0;
640
641out:
Herbert Xu862b82c2007-11-13 21:43:11 -0800642 return err;
643}
Herbert Xuc6581a42007-11-13 21:43:43 -0800644EXPORT_SYMBOL_GPL(xfrm_output_resume);
Herbert Xu862b82c2007-11-13 21:43:11 -0800645
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500646static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
Herbert Xuc6581a42007-11-13 21:43:43 -0800647{
Evan Nimmo9ab12652021-03-02 08:00:04 +1300648 return xfrm_output_resume(sk, skb, 1);
Herbert Xuc6581a42007-11-13 21:43:43 -0800649}
650
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500651static int xfrm_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb)
Herbert Xu862b82c2007-11-13 21:43:11 -0800652{
Jason A. Donenfeldc3b18e02020-01-13 18:42:28 -0500653 struct sk_buff *segs, *nskb;
Herbert Xu862b82c2007-11-13 21:43:11 -0800654
Cambda Zhua08e7fd2020-03-26 15:33:14 +0800655 BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_GSO_CB_OFFSET);
656 BUILD_BUG_ON(sizeof(*IP6CB(skb)) > SKB_GSO_CB_OFFSET);
Herbert Xu862b82c2007-11-13 21:43:11 -0800657 segs = skb_gso_segment(skb, 0);
658 kfree_skb(skb);
Hirofumi Nakagawa801678c2008-04-29 01:03:09 -0700659 if (IS_ERR(segs))
Herbert Xu862b82c2007-11-13 21:43:11 -0800660 return PTR_ERR(segs);
Florian Westphal330966e2014-10-20 13:49:17 +0200661 if (segs == NULL)
662 return -EINVAL;
Herbert Xu862b82c2007-11-13 21:43:11 -0800663
Jason A. Donenfeldc3b18e02020-01-13 18:42:28 -0500664 skb_list_walk_safe(segs, segs, nskb) {
Herbert Xu862b82c2007-11-13 21:43:11 -0800665 int err;
666
David S. Millera8305bf2018-07-29 20:42:53 -0700667 skb_mark_not_on_list(segs);
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500668 err = xfrm_output2(net, sk, segs);
Herbert Xu862b82c2007-11-13 21:43:11 -0800669
670 if (unlikely(err)) {
Florian Westphal46cfd722014-09-10 01:08:46 +0200671 kfree_skb_list(nskb);
Herbert Xu862b82c2007-11-13 21:43:11 -0800672 return err;
673 }
Jason A. Donenfeldc3b18e02020-01-13 18:42:28 -0500674 }
Herbert Xu862b82c2007-11-13 21:43:11 -0800675
676 return 0;
Herbert Xu406ef772007-10-08 17:16:30 -0700677}
Herbert Xuc6581a42007-11-13 21:43:43 -0800678
David Miller7026b1d2015-04-05 22:19:04 -0400679int xfrm_output(struct sock *sk, struct sk_buff *skb)
Herbert Xuc6581a42007-11-13 21:43:43 -0800680{
Eric Dumazetadf30902009-06-02 05:19:30 +0000681 struct net *net = dev_net(skb_dst(skb)->dev);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200682 struct xfrm_state *x = skb_dst(skb)->xfrm;
Herbert Xuc6581a42007-11-13 21:43:43 -0800683 int err;
684
Florian Westphal2ab60962020-05-04 10:06:09 +0200685 switch (x->outer_mode.family) {
686 case AF_INET:
687 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
Florian Westphal2ab60962020-05-04 10:06:09 +0200688 IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
Florian Westphal2ab60962020-05-04 10:06:09 +0200689 break;
690 case AF_INET6:
691 memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
692
Florian Westphal2ab60962020-05-04 10:06:09 +0200693 IP6CB(skb)->flags |= IP6SKB_XFRM_TRANSFORMED;
Florian Westphal2ab60962020-05-04 10:06:09 +0200694 break;
695 }
696
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200697 secpath_reset(skb);
698
699 if (xfrm_dev_offload_ok(skb, x)) {
700 struct sec_path *sp;
701
Florian Westphala84e3f52018-12-18 17:15:26 +0100702 sp = secpath_set(skb);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200703 if (!sp) {
704 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
705 kfree_skb(skb);
706 return -ENOMEM;
707 }
Ilan Tayarif1bd7d62017-04-14 10:07:39 +0200708 skb->encapsulation = 1;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200709
710 sp->olen++;
Florian Westphala84e3f52018-12-18 17:15:26 +0100711 sp->xvec[sp->len++] = x;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200712 xfrm_state_hold(x);
713
714 if (skb_is_gso(skb)) {
Xin Longa204aef2020-04-20 21:51:09 +0800715 if (skb->inner_protocol)
716 return xfrm_output_gso(net, sk, skb);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200717
Xin Longa204aef2020-04-20 21:51:09 +0800718 skb_shinfo(skb)->gso_type |= SKB_GSO_ESP;
719 goto out;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200720 }
721
722 if (x->xso.dev && x->xso.dev->features & NETIF_F_HW_ESP_TX_CSUM)
723 goto out;
Xin Longa204aef2020-04-20 21:51:09 +0800724 } else {
725 if (skb_is_gso(skb))
726 return xfrm_output_gso(net, sk, skb);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200727 }
728
Herbert Xuc6581a42007-11-13 21:43:43 -0800729 if (skb->ip_summed == CHECKSUM_PARTIAL) {
730 err = skb_checksum_help(skb);
731 if (err) {
Alexey Dobriyan59c99402008-11-25 17:59:52 -0800732 XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
Herbert Xuc6581a42007-11-13 21:43:43 -0800733 kfree_skb(skb);
734 return err;
735 }
736 }
737
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200738out:
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500739 return xfrm_output2(net, sk, skb);
Herbert Xuc6581a42007-11-13 21:43:43 -0800740}
Fabian Frederickfc680862014-05-12 19:54:34 +0200741EXPORT_SYMBOL_GPL(xfrm_output);
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -0700742
Florian Westphal6d64be32020-05-04 10:06:03 +0200743static int xfrm4_tunnel_check_size(struct sk_buff *skb)
744{
745 int mtu, ret = 0;
746
747 if (IPCB(skb)->flags & IPSKB_XFRM_TUNNEL_SIZE)
748 goto out;
749
750 if (!(ip_hdr(skb)->frag_off & htons(IP_DF)) || skb->ignore_df)
751 goto out;
752
753 mtu = dst_mtu(skb_dst(skb));
754 if ((!skb_is_gso(skb) && skb->len > mtu) ||
755 (skb_is_gso(skb) &&
756 !skb_gso_validate_network_len(skb, ip_skb_dst_mtu(skb->sk, skb)))) {
757 skb->protocol = htons(ETH_P_IP);
758
759 if (skb->sk)
760 xfrm_local_error(skb, mtu);
761 else
762 icmp_send(skb, ICMP_DEST_UNREACH,
763 ICMP_FRAG_NEEDED, htonl(mtu));
764 ret = -EMSGSIZE;
765 }
766out:
767 return ret;
768}
769
770static int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb)
771{
772 int err;
773
Xin Long68dc0222021-03-19 18:27:58 +0800774 if (x->outer_mode.encap == XFRM_MODE_BEET &&
775 ip_is_fragment(ip_hdr(skb))) {
776 net_warn_ratelimited("BEET mode doesn't support inner IPv4 fragments\n");
777 return -EAFNOSUPPORT;
778 }
779
Florian Westphal6d64be32020-05-04 10:06:03 +0200780 err = xfrm4_tunnel_check_size(skb);
781 if (err)
782 return err;
783
784 XFRM_MODE_SKB_CB(skb)->protocol = ip_hdr(skb)->protocol;
785
786 xfrm4_extract_header(skb);
787 return 0;
788}
789
Florian Westphalf3075f482020-05-04 10:06:08 +0200790#if IS_ENABLED(CONFIG_IPV6)
791static int xfrm6_tunnel_check_size(struct sk_buff *skb)
792{
793 int mtu, ret = 0;
794 struct dst_entry *dst = skb_dst(skb);
795
796 if (skb->ignore_df)
797 goto out;
798
799 mtu = dst_mtu(dst);
800 if (mtu < IPV6_MIN_MTU)
801 mtu = IPV6_MIN_MTU;
802
803 if ((!skb_is_gso(skb) && skb->len > mtu) ||
804 (skb_is_gso(skb) &&
805 !skb_gso_validate_network_len(skb, ip6_skb_dst_mtu(skb)))) {
806 skb->dev = dst->dev;
807 skb->protocol = htons(ETH_P_IPV6);
808
809 if (xfrm6_local_dontfrag(skb->sk))
810 ipv6_stub->xfrm6_local_rxpmtu(skb, mtu);
811 else if (skb->sk)
812 xfrm_local_error(skb, mtu);
813 else
814 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
815 ret = -EMSGSIZE;
816 }
817out:
818 return ret;
819}
820#endif
821
822static int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb)
823{
824#if IS_ENABLED(CONFIG_IPV6)
Xin Long68dc0222021-03-19 18:27:58 +0800825 unsigned int ptr = 0;
Florian Westphalf3075f482020-05-04 10:06:08 +0200826 int err;
827
Xin Long68dc0222021-03-19 18:27:58 +0800828 if (x->outer_mode.encap == XFRM_MODE_BEET &&
829 ipv6_find_hdr(skb, &ptr, NEXTHDR_FRAGMENT, NULL, NULL) >= 0) {
830 net_warn_ratelimited("BEET mode doesn't support inner IPv6 fragments\n");
831 return -EAFNOSUPPORT;
832 }
833
Florian Westphalf3075f482020-05-04 10:06:08 +0200834 err = xfrm6_tunnel_check_size(skb);
835 if (err)
836 return err;
837
838 XFRM_MODE_SKB_CB(skb)->protocol = ipv6_hdr(skb)->nexthdr;
839
840 xfrm6_extract_header(skb);
841 return 0;
842#else
843 WARN_ON_ONCE(1);
844 return -EAFNOSUPPORT;
845#endif
846}
847
Florian Westphal0c620e92019-03-29 21:16:25 +0100848static int xfrm_inner_extract_output(struct xfrm_state *x, struct sk_buff *skb)
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -0700849{
Florian Westphal4c145dc2019-03-29 21:16:31 +0100850 const struct xfrm_mode *inner_mode;
Florian Westphal733a5fa2019-03-29 21:16:30 +0100851
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -0700852 if (x->sel.family == AF_UNSPEC)
853 inner_mode = xfrm_ip2inner_mode(x,
Eric Dumazetadf30902009-06-02 05:19:30 +0000854 xfrm_af2proto(skb_dst(skb)->ops->family));
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -0700855 else
Florian Westphalc9500d72019-03-29 21:16:32 +0100856 inner_mode = &x->inner_mode;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -0700857
858 if (inner_mode == NULL)
859 return -EAFNOSUPPORT;
Florian Westphal733a5fa2019-03-29 21:16:30 +0100860
Florian Westphal6d64be32020-05-04 10:06:03 +0200861 switch (inner_mode->family) {
862 case AF_INET:
863 return xfrm4_extract_output(x, skb);
Florian Westphalf3075f482020-05-04 10:06:08 +0200864 case AF_INET6:
865 return xfrm6_extract_output(x, skb);
Florian Westphal6d64be32020-05-04 10:06:03 +0200866 }
Florian Westphal733a5fa2019-03-29 21:16:30 +0100867
Florian Westphalf3075f482020-05-04 10:06:08 +0200868 return -EAFNOSUPPORT;
Kazunori MIYAZAWAdf9dcb42008-03-24 14:51:51 -0700869}
870
Hannes Frederic Sowa628e3412013-08-14 13:05:23 +0200871void xfrm_local_error(struct sk_buff *skb, int mtu)
872{
Hannes Frederic Sowa844d4872013-08-18 13:47:01 +0200873 unsigned int proto;
Hannes Frederic Sowa628e3412013-08-14 13:05:23 +0200874 struct xfrm_state_afinfo *afinfo;
875
Hannes Frederic Sowa844d4872013-08-18 13:47:01 +0200876 if (skb->protocol == htons(ETH_P_IP))
877 proto = AF_INET;
Xin Longf6a23d82020-05-26 17:41:46 +0800878 else if (skb->protocol == htons(ETH_P_IPV6) &&
879 skb->sk->sk_family == AF_INET6)
Hannes Frederic Sowa844d4872013-08-18 13:47:01 +0200880 proto = AF_INET6;
881 else
882 return;
883
884 afinfo = xfrm_state_get_afinfo(proto);
Taehee Yoo46c0ef62018-03-16 11:35:51 +0900885 if (afinfo) {
Florian Westphalaf5d27c2017-01-09 14:20:47 +0100886 afinfo->local_error(skb, mtu);
Taehee Yoo46c0ef62018-03-16 11:35:51 +0900887 rcu_read_unlock();
888 }
Hannes Frederic Sowa628e3412013-08-14 13:05:23 +0200889}
Hannes Frederic Sowa628e3412013-08-14 13:05:23 +0200890EXPORT_SYMBOL_GPL(xfrm_local_error);