blob: 8bb2c407b46b3f6dec99ddb9ccc3f21a0255323a [file] [log] [blame]
Thomas Gleixner1ccea772019-05-19 15:51:43 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Copyright (C)2002 USAGI/WIDE Project
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors
6 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Mitsuru KANDA @USAGI : IPv6 Support
Ian Morris67ba4152014-08-24 21:53:10 +01008 * Kazunori MIYAZAWA @USAGI :
9 * Kunihiro Ishiguro <kunihiro@ipinfusion.com>
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +090010 *
Ian Morris67ba4152014-08-24 21:53:10 +010011 * This file is derived from net/ipv4/esp.c
Linus Torvalds1da177e2005-04-16 15:20:36 -070012 */
13
Joe Perchesf3213832012-05-15 14:11:53 +000014#define pr_fmt(fmt) "IPv6: " fmt
15
Herbert Xu38320c72008-01-28 19:35:05 -080016#include <crypto/aead.h>
17#include <crypto/authenc.h>
Herbert Xu6b7326c2006-07-30 15:41:01 +100018#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/module.h>
20#include <net/ip.h>
21#include <net/xfrm.h>
22#include <net/esp.h>
Adrian Bunk72998d82007-10-26 22:53:58 -070023#include <linux/scatterlist.h>
Herbert Xua02a6422005-10-10 21:11:08 -070024#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/pfkeyv2.h>
26#include <linux/random.h>
Herbert Xu38320c72008-01-28 19:35:05 -080027#include <linux/slab.h>
Herbert Xub7c6538c2007-10-09 13:33:35 -070028#include <linux/spinlock.h>
Sabrina Dubroca0146dca2020-04-27 17:59:34 +020029#include <net/ip6_checksum.h>
David S. Miller81aded22012-06-15 14:54:11 -070030#include <net/ip6_route.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <net/icmp.h>
32#include <net/ipv6.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020033#include <net/protocol.h>
Sabrina Dubroca0146dca2020-04-27 17:59:34 +020034#include <net/udp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/icmpv6.h>
Sabrina Dubroca26333c32020-04-27 17:59:35 +020036#include <net/tcp.h>
37#include <net/espintcp.h>
38#include <net/inet6_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Steffen Klassert03e2a302017-01-17 10:23:03 +010040#include <linux/highmem.h>
41
Herbert Xu38320c72008-01-28 19:35:05 -080042struct esp_skb_cb {
43 struct xfrm_skb_cb xfrm;
44 void *tmp;
45};
46
Sabrina Dubroca0146dca2020-04-27 17:59:34 +020047struct esp_output_extra {
48 __be32 seqhi;
49 u32 esphoff;
50};
51
Herbert Xu38320c72008-01-28 19:35:05 -080052#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
53
54/*
55 * Allocate an AEAD request structure with extra space for SG and IV.
56 *
Steffen Klassertd212a4c2011-03-08 00:07:51 +000057 * For alignment considerations the upper 32 bits of the sequence number are
58 * placed at the front, if present. Followed by the IV, the request and finally
59 * the SG list.
Herbert Xu38320c72008-01-28 19:35:05 -080060 *
61 * TODO: Use spare space in skb for this where possible.
62 */
Steffen Klassertd212a4c2011-03-08 00:07:51 +000063static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqihlen)
Herbert Xu38320c72008-01-28 19:35:05 -080064{
65 unsigned int len;
66
Steffen Klassertd212a4c2011-03-08 00:07:51 +000067 len = seqihlen;
68
69 len += crypto_aead_ivsize(aead);
70
Herbert Xu38320c72008-01-28 19:35:05 -080071 if (len) {
72 len += crypto_aead_alignmask(aead) &
73 ~(crypto_tfm_ctx_alignment() - 1);
74 len = ALIGN(len, crypto_tfm_ctx_alignment());
75 }
76
Herbert Xu000ae7b2015-05-27 16:03:47 +080077 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
Herbert Xu38320c72008-01-28 19:35:05 -080078 len = ALIGN(len, __alignof__(struct scatterlist));
79
80 len += sizeof(struct scatterlist) * nfrags;
81
82 return kmalloc(len, GFP_ATOMIC);
83}
84
Sabrina Dubroca0146dca2020-04-27 17:59:34 +020085static inline void *esp_tmp_extra(void *tmp)
Steffen Klassertd212a4c2011-03-08 00:07:51 +000086{
Sabrina Dubroca0146dca2020-04-27 17:59:34 +020087 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
Steffen Klassertd212a4c2011-03-08 00:07:51 +000088}
89
90static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
Herbert Xu38320c72008-01-28 19:35:05 -080091{
92 return crypto_aead_ivsize(aead) ?
Steffen Klassertd212a4c2011-03-08 00:07:51 +000093 PTR_ALIGN((u8 *)tmp + seqhilen,
94 crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
Herbert Xu38320c72008-01-28 19:35:05 -080095}
96
Herbert Xu38320c72008-01-28 19:35:05 -080097static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
98{
99 struct aead_request *req;
100
101 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
102 crypto_tfm_ctx_alignment());
103 aead_request_set_tfm(req, aead);
104 return req;
105}
106
107static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
108 struct aead_request *req)
109{
110 return (void *)ALIGN((unsigned long)(req + 1) +
111 crypto_aead_reqsize(aead),
112 __alignof__(struct scatterlist));
113}
114
Steffen Klassert03e2a302017-01-17 10:23:03 +0100115static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
116{
Steffen Klassert03e2a302017-01-17 10:23:03 +0100117 struct crypto_aead *aead = x->data;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200118 int extralen = 0;
Steffen Klassert03e2a302017-01-17 10:23:03 +0100119 u8 *iv;
120 struct aead_request *req;
121 struct scatterlist *sg;
122
123 if (x->props.flags & XFRM_STATE_ESN)
luo penghaoc6e78712021-11-04 03:19:31 +0000124 extralen += sizeof(struct esp_output_extra);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100125
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200126 iv = esp_tmp_iv(aead, tmp, extralen);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100127 req = esp_tmp_req(aead, iv);
128
129 /* Unref skb_frag_pages in the src scatterlist if necessary.
130 * Skip the first sg which comes from skb->data.
131 */
132 if (req->src != req->dst)
133 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
134 put_page(sg_page(sg));
135}
136
Sabrina Dubroca26333c32020-04-27 17:59:35 +0200137#ifdef CONFIG_INET6_ESPINTCP
138struct esp_tcp_sk {
139 struct sock *sk;
140 struct rcu_head rcu;
141};
142
143static void esp_free_tcp_sk(struct rcu_head *head)
144{
145 struct esp_tcp_sk *esk = container_of(head, struct esp_tcp_sk, rcu);
146
147 sock_put(esk->sk);
148 kfree(esk);
149}
150
151static struct sock *esp6_find_tcp_sk(struct xfrm_state *x)
152{
153 struct xfrm_encap_tmpl *encap = x->encap;
154 struct esp_tcp_sk *esk;
155 __be16 sport, dport;
156 struct sock *nsk;
157 struct sock *sk;
158
159 sk = rcu_dereference(x->encap_sk);
160 if (sk && sk->sk_state == TCP_ESTABLISHED)
161 return sk;
162
163 spin_lock_bh(&x->lock);
164 sport = encap->encap_sport;
165 dport = encap->encap_dport;
166 nsk = rcu_dereference_protected(x->encap_sk,
167 lockdep_is_held(&x->lock));
168 if (sk && sk == nsk) {
169 esk = kmalloc(sizeof(*esk), GFP_ATOMIC);
170 if (!esk) {
171 spin_unlock_bh(&x->lock);
172 return ERR_PTR(-ENOMEM);
173 }
174 RCU_INIT_POINTER(x->encap_sk, NULL);
175 esk->sk = sk;
176 call_rcu(&esk->rcu, esp_free_tcp_sk);
177 }
178 spin_unlock_bh(&x->lock);
179
180 sk = __inet6_lookup_established(xs_net(x), &tcp_hashinfo, &x->id.daddr.in6,
181 dport, &x->props.saddr.in6, ntohs(sport), 0, 0);
182 if (!sk)
183 return ERR_PTR(-ENOENT);
184
185 if (!tcp_is_ulp_esp(sk)) {
186 sock_put(sk);
187 return ERR_PTR(-EINVAL);
188 }
189
190 spin_lock_bh(&x->lock);
191 nsk = rcu_dereference_protected(x->encap_sk,
192 lockdep_is_held(&x->lock));
193 if (encap->encap_sport != sport ||
194 encap->encap_dport != dport) {
195 sock_put(sk);
196 sk = nsk ?: ERR_PTR(-EREMCHG);
197 } else if (sk == nsk) {
198 sock_put(sk);
199 } else {
200 rcu_assign_pointer(x->encap_sk, sk);
201 }
202 spin_unlock_bh(&x->lock);
203
204 return sk;
205}
206
207static int esp_output_tcp_finish(struct xfrm_state *x, struct sk_buff *skb)
208{
209 struct sock *sk;
210 int err;
211
212 rcu_read_lock();
213
214 sk = esp6_find_tcp_sk(x);
215 err = PTR_ERR_OR_ZERO(sk);
216 if (err)
217 goto out;
218
219 bh_lock_sock(sk);
220 if (sock_owned_by_user(sk))
221 err = espintcp_queue_out(sk, skb);
222 else
223 err = espintcp_push_skb(sk, skb);
224 bh_unlock_sock(sk);
225
226out:
227 rcu_read_unlock();
228 return err;
229}
230
231static int esp_output_tcp_encap_cb(struct net *net, struct sock *sk,
232 struct sk_buff *skb)
233{
234 struct dst_entry *dst = skb_dst(skb);
235 struct xfrm_state *x = dst->xfrm;
236
237 return esp_output_tcp_finish(x, skb);
238}
239
240static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
241{
242 int err;
243
244 local_bh_disable();
245 err = xfrm_trans_queue_net(xs_net(x), skb, esp_output_tcp_encap_cb);
246 local_bh_enable();
247
248 /* EINPROGRESS just happens to do the right thing. It
249 * actually means that the skb has been consumed and
250 * isn't coming back.
251 */
252 return err ?: -EINPROGRESS;
253}
254#else
255static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb)
256{
257 kfree_skb(skb);
258
259 return -EOPNOTSUPP;
260}
261#endif
262
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200263static void esp_output_encap_csum(struct sk_buff *skb)
264{
265 /* UDP encap with IPv6 requires a valid checksum */
266 if (*skb_mac_header(skb) == IPPROTO_UDP) {
267 struct udphdr *uh = udp_hdr(skb);
268 struct ipv6hdr *ip6h = ipv6_hdr(skb);
269 int len = ntohs(uh->len);
270 unsigned int offset = skb_transport_offset(skb);
271 __wsum csum = skb_checksum(skb, offset, skb->len - offset, 0);
272
273 uh->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
274 len, IPPROTO_UDP, csum);
275 if (uh->check == 0)
276 uh->check = CSUM_MANGLED_0;
277 }
278}
279
Herbert Xu38320c72008-01-28 19:35:05 -0800280static void esp_output_done(struct crypto_async_request *base, int err)
281{
282 struct sk_buff *skb = base->data;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100283 struct xfrm_offload *xo = xfrm_offload(skb);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100284 void *tmp;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100285 struct xfrm_state *x;
286
Florian Westphal2294be0f2018-12-18 17:15:20 +0100287 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
288 struct sec_path *sp = skb_sec_path(skb);
289
290 x = sp->xvec[sp->len - 1];
291 } else {
Steffen Klassertf53c7232017-12-20 10:41:36 +0100292 x = skb_dst(skb)->xfrm;
Florian Westphal2294be0f2018-12-18 17:15:20 +0100293 }
Herbert Xu38320c72008-01-28 19:35:05 -0800294
Steffen Klassert03e2a302017-01-17 10:23:03 +0100295 tmp = ESP_SKB_CB(skb)->tmp;
296 esp_ssg_unref(x, tmp);
297 kfree(tmp);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100298
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200299 esp_output_encap_csum(skb);
300
Steffen Klassertf53c7232017-12-20 10:41:36 +0100301 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
302 if (err) {
303 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
304 kfree_skb(skb);
305 return;
306 }
307
308 skb_push(skb, skb->data - skb_mac_header(skb));
309 secpath_reset(skb);
310 xfrm_dev_resume(skb);
311 } else {
Sabrina Dubroca26333c32020-04-27 17:59:35 +0200312 if (!err &&
313 x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
314 esp_output_tail_tcp(x, skb);
315 else
Evan Nimmo9ab12652021-03-02 08:00:04 +1300316 xfrm_output_resume(skb->sk, skb, err);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100317 }
Herbert Xu38320c72008-01-28 19:35:05 -0800318}
319
Herbert Xu000ae7b2015-05-27 16:03:47 +0800320/* Move ESP header back into place. */
321static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
322{
323 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
324 void *tmp = ESP_SKB_CB(skb)->tmp;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200325 __be32 *seqhi = esp_tmp_extra(tmp);
Herbert Xu000ae7b2015-05-27 16:03:47 +0800326
327 esph->seq_no = esph->spi;
328 esph->spi = *seqhi;
329}
330
331static void esp_output_restore_header(struct sk_buff *skb)
332{
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200333 void *tmp = ESP_SKB_CB(skb)->tmp;
334 struct esp_output_extra *extra = esp_tmp_extra(tmp);
335
336 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
337 sizeof(__be32));
Herbert Xu000ae7b2015-05-27 16:03:47 +0800338}
339
Steffen Klassert03e2a302017-01-17 10:23:03 +0100340static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
Steffen Klassert383d0352017-04-14 10:06:42 +0200341 struct xfrm_state *x,
Steffen Klassert03e2a302017-01-17 10:23:03 +0100342 struct ip_esp_hdr *esph,
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200343 struct esp_output_extra *extra)
Steffen Klassert03e2a302017-01-17 10:23:03 +0100344{
Steffen Klassert03e2a302017-01-17 10:23:03 +0100345 /* For ESN we move the header forward by 4 bytes to
346 * accomodate the high bits. We will move it back after
347 * encryption.
348 */
349 if ((x->props.flags & XFRM_STATE_ESN)) {
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200350 __u32 seqhi;
Steffen Klassert7862b402017-04-14 10:06:50 +0200351 struct xfrm_offload *xo = xfrm_offload(skb);
352
Steffen Klassert7862b402017-04-14 10:06:50 +0200353 if (xo)
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200354 seqhi = xo->seq.hi;
Steffen Klassert7862b402017-04-14 10:06:50 +0200355 else
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200356 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
357
358 extra->esphoff = (unsigned char *)esph -
359 skb_transport_header(skb);
360 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
361 extra->seqhi = esph->spi;
362 esph->seq_no = htonl(seqhi);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100363 }
364
365 esph->spi = x->id.spi;
366
367 return esph;
368}
369
Herbert Xu000ae7b2015-05-27 16:03:47 +0800370static void esp_output_done_esn(struct crypto_async_request *base, int err)
371{
372 struct sk_buff *skb = base->data;
373
374 esp_output_restore_header(skb);
375 esp_output_done(base, err);
376}
377
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200378static struct ip_esp_hdr *esp6_output_udp_encap(struct sk_buff *skb,
379 int encap_type,
380 struct esp_info *esp,
381 __be16 sport,
382 __be16 dport)
383{
384 struct udphdr *uh;
385 __be32 *udpdata32;
386 unsigned int len;
387
388 len = skb->len + esp->tailen - skb_transport_offset(skb);
389 if (len > U16_MAX)
390 return ERR_PTR(-EMSGSIZE);
391
392 uh = (struct udphdr *)esp->esph;
393 uh->source = sport;
394 uh->dest = dport;
395 uh->len = htons(len);
396 uh->check = 0;
397
398 *skb_mac_header(skb) = IPPROTO_UDP;
399
400 if (encap_type == UDP_ENCAP_ESPINUDP_NON_IKE) {
401 udpdata32 = (__be32 *)(uh + 1);
402 udpdata32[0] = udpdata32[1] = 0;
403 return (struct ip_esp_hdr *)(udpdata32 + 2);
404 }
405
406 return (struct ip_esp_hdr *)(uh + 1);
407}
408
Sabrina Dubroca26333c32020-04-27 17:59:35 +0200409#ifdef CONFIG_INET6_ESPINTCP
410static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
411 struct sk_buff *skb,
412 struct esp_info *esp)
413{
414 __be16 *lenp = (void *)esp->esph;
415 struct ip_esp_hdr *esph;
416 unsigned int len;
417 struct sock *sk;
418
419 len = skb->len + esp->tailen - skb_transport_offset(skb);
420 if (len > IP_MAX_MTU)
421 return ERR_PTR(-EMSGSIZE);
422
423 rcu_read_lock();
424 sk = esp6_find_tcp_sk(x);
425 rcu_read_unlock();
426
427 if (IS_ERR(sk))
428 return ERR_CAST(sk);
429
430 *lenp = htons(len);
431 esph = (struct ip_esp_hdr *)(lenp + 1);
432
433 return esph;
434}
435#else
436static struct ip_esp_hdr *esp6_output_tcp_encap(struct xfrm_state *x,
437 struct sk_buff *skb,
438 struct esp_info *esp)
439{
440 return ERR_PTR(-EOPNOTSUPP);
441}
442#endif
443
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200444static int esp6_output_encap(struct xfrm_state *x, struct sk_buff *skb,
445 struct esp_info *esp)
446{
447 struct xfrm_encap_tmpl *encap = x->encap;
448 struct ip_esp_hdr *esph;
449 __be16 sport, dport;
450 int encap_type;
451
452 spin_lock_bh(&x->lock);
453 sport = encap->encap_sport;
454 dport = encap->encap_dport;
455 encap_type = encap->encap_type;
456 spin_unlock_bh(&x->lock);
457
458 switch (encap_type) {
459 default:
460 case UDP_ENCAP_ESPINUDP:
461 case UDP_ENCAP_ESPINUDP_NON_IKE:
462 esph = esp6_output_udp_encap(skb, encap_type, esp, sport, dport);
463 break;
Sabrina Dubroca26333c32020-04-27 17:59:35 +0200464 case TCP_ENCAP_ESPINTCP:
465 esph = esp6_output_tcp_encap(x, skb, esp);
466 break;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200467 }
468
469 if (IS_ERR(esph))
470 return PTR_ERR(esph);
471
472 esp->esph = esph;
473
474 return 0;
475}
476
Steffen Klassert383d0352017-04-14 10:06:42 +0200477int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700479 u8 *tail;
Steffen Klassert383d0352017-04-14 10:06:42 +0200480 int nfrags;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200481 int esph_offset;
Steffen Klassert383d0352017-04-14 10:06:42 +0200482 struct page *page;
Steffen Klassert383d0352017-04-14 10:06:42 +0200483 struct sk_buff *trailer;
484 int tailen = esp->tailen;
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000485
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200486 if (x->encap) {
487 int err = esp6_output_encap(x, skb, esp);
488
489 if (err < 0)
490 return err;
491 }
492
Steffen Klassert03e2a302017-01-17 10:23:03 +0100493 if (!skb_cloned(skb)) {
Steffen Klassert54ffd792017-08-25 07:34:35 +0200494 if (tailen <= skb_tailroom(skb)) {
Steffen Klassert03e2a302017-01-17 10:23:03 +0100495 nfrags = 1;
496 trailer = skb;
497 tail = skb_tail_pointer(trailer);
498
499 goto skip_cow;
500 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
501 && !skb_has_frag_list(skb)) {
502 int allocsize;
503 struct sock *sk = skb->sk;
504 struct page_frag *pfrag = &x->xfrag;
505
Steffen Klassert383d0352017-04-14 10:06:42 +0200506 esp->inplace = false;
507
Steffen Klassert03e2a302017-01-17 10:23:03 +0100508 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
509
510 spin_lock_bh(&x->lock);
511
512 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
513 spin_unlock_bh(&x->lock);
514 goto cow;
515 }
516
517 page = pfrag->page;
518 get_page(page);
519
Willem de Bruijn9bd6b622021-01-09 17:18:34 -0500520 tail = page_address(page) + pfrag->offset;
Steffen Klassert03e2a302017-01-17 10:23:03 +0100521
Steffen Klassert383d0352017-04-14 10:06:42 +0200522 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100523
Steffen Klassert03e2a302017-01-17 10:23:03 +0100524 nfrags = skb_shinfo(skb)->nr_frags;
525
526 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
527 tailen);
528 skb_shinfo(skb)->nr_frags = ++nfrags;
529
530 pfrag->offset = pfrag->offset + allocsize;
Steffen Klassert36ff0dd2017-08-25 07:16:07 +0200531
532 spin_unlock_bh(&x->lock);
533
Steffen Klassert03e2a302017-01-17 10:23:03 +0100534 nfrags++;
535
536 skb->len += tailen;
537 skb->data_len += tailen;
538 skb->truesize += tailen;
Martin Willi09db5122019-01-28 09:35:35 +0100539 if (sk && sk_fullsock(sk))
Reshetova, Elena14afee42017-06-30 13:08:00 +0300540 refcount_add(tailen, &sk->sk_wmem_alloc);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100541
Steffen Klassert383d0352017-04-14 10:06:42 +0200542 goto out;
Steffen Klassert03e2a302017-01-17 10:23:03 +0100543 }
544 }
545
546cow:
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200547 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
548
Steffen Klassert383d0352017-04-14 10:06:42 +0200549 nfrags = skb_cow_data(skb, tailen, &trailer);
550 if (nfrags < 0)
551 goto out;
Steffen Klassert03e2a302017-01-17 10:23:03 +0100552 tail = skb_tail_pointer(trailer);
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200553 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100554
555skip_cow:
Steffen Klassert383d0352017-04-14 10:06:42 +0200556 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
557 pskb_put(skb, trailer, tailen);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100558
Steffen Klassert383d0352017-04-14 10:06:42 +0200559out:
560 return nfrags;
561}
562EXPORT_SYMBOL_GPL(esp6_output_head);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100563
Steffen Klassert383d0352017-04-14 10:06:42 +0200564int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
565{
566 u8 *iv;
567 int alen;
568 void *tmp;
569 int ivlen;
570 int assoclen;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200571 int extralen;
Steffen Klassert383d0352017-04-14 10:06:42 +0200572 struct page *page;
573 struct ip_esp_hdr *esph;
574 struct aead_request *req;
575 struct crypto_aead *aead;
576 struct scatterlist *sg, *dsg;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200577 struct esp_output_extra *extra;
Steffen Klassert383d0352017-04-14 10:06:42 +0200578 int err = -ENOMEM;
Steffen Klassert03e2a302017-01-17 10:23:03 +0100579
Steffen Klassert383d0352017-04-14 10:06:42 +0200580 assoclen = sizeof(struct ip_esp_hdr);
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200581 extralen = 0;
Steffen Klassert383d0352017-04-14 10:06:42 +0200582
583 if (x->props.flags & XFRM_STATE_ESN) {
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200584 extralen += sizeof(*extra);
Steffen Klassert383d0352017-04-14 10:06:42 +0200585 assoclen += sizeof(__be32);
586 }
587
588 aead = x->data;
589 alen = crypto_aead_authsize(aead);
590 ivlen = crypto_aead_ivsize(aead);
591
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200592 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
Steffen Klasserte892d2d2017-04-24 07:33:56 +0200593 if (!tmp)
Herbert Xu38320c72008-01-28 19:35:05 -0800594 goto error;
595
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200596 extra = esp_tmp_extra(tmp);
597 iv = esp_tmp_iv(aead, tmp, extralen);
Herbert Xu000ae7b2015-05-27 16:03:47 +0800598 req = esp_tmp_req(aead, iv);
599 sg = esp_req_sg(aead, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
Steffen Klassert383d0352017-04-14 10:06:42 +0200601 if (esp->inplace)
602 dsg = sg;
603 else
604 dsg = &sg[esp->nfrags];
Herbert Xu000ae7b2015-05-27 16:03:47 +0800605
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200606 esph = esp_output_set_esn(skb, x, esp->esph, extra);
607 esp->esph = esph;
Steffen Klassert383d0352017-04-14 10:06:42 +0200608
609 sg_init_table(sg, esp->nfrags);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200610 err = skb_to_sgvec(skb, sg,
611 (unsigned char *)esph - skb->data,
612 assoclen + ivlen + esp->clen + alen);
613 if (unlikely(err < 0))
Steffen Klasserte6194922017-07-13 09:13:30 +0200614 goto error_free;
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000615
Steffen Klassert383d0352017-04-14 10:06:42 +0200616 if (!esp->inplace) {
617 int allocsize;
618 struct page_frag *pfrag = &x->xfrag;
619
620 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
621
622 spin_lock_bh(&x->lock);
623 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
624 spin_unlock_bh(&x->lock);
Steffen Klasserte6194922017-07-13 09:13:30 +0200625 goto error_free;
Steffen Klassert383d0352017-04-14 10:06:42 +0200626 }
627
628 skb_shinfo(skb)->nr_frags = 1;
629
630 page = pfrag->page;
631 get_page(page);
632 /* replace page frags in skb with new page */
633 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
634 pfrag->offset = pfrag->offset + allocsize;
635 spin_unlock_bh(&x->lock);
636
637 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200638 err = skb_to_sgvec(skb, dsg,
639 (unsigned char *)esph - skb->data,
640 assoclen + ivlen + esp->clen + alen);
641 if (unlikely(err < 0))
Steffen Klasserte6194922017-07-13 09:13:30 +0200642 goto error_free;
Steffen Klassert383d0352017-04-14 10:06:42 +0200643 }
644
Steffen Klassert03e2a302017-01-17 10:23:03 +0100645 if ((x->props.flags & XFRM_STATE_ESN))
646 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
647 else
648 aead_request_set_callback(req, 0, esp_output_done, skb);
649
Steffen Klassert383d0352017-04-14 10:06:42 +0200650 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
Herbert Xu000ae7b2015-05-27 16:03:47 +0800651 aead_request_set_ad(req, assoclen);
Herbert Xub7c6538c2007-10-09 13:33:35 -0700652
Herbert Xu000ae7b2015-05-27 16:03:47 +0800653 memset(iv, 0, ivlen);
Steffen Klassert383d0352017-04-14 10:06:42 +0200654 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
Herbert Xu000ae7b2015-05-27 16:03:47 +0800655 min(ivlen, 8));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Herbert Xu38320c72008-01-28 19:35:05 -0800657 ESP_SKB_CB(skb)->tmp = tmp;
Herbert Xu000ae7b2015-05-27 16:03:47 +0800658 err = crypto_aead_encrypt(req);
659
660 switch (err) {
661 case -EINPROGRESS:
Herbert Xu38320c72008-01-28 19:35:05 -0800662 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
Gilad Ben-Yossef068c2e72017-10-18 08:00:35 +0100664 case -ENOSPC:
Herbert Xu38320c72008-01-28 19:35:05 -0800665 err = NET_XMIT_DROP;
Herbert Xu000ae7b2015-05-27 16:03:47 +0800666 break;
667
668 case 0:
669 if ((x->props.flags & XFRM_STATE_ESN))
670 esp_output_restore_header(skb);
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200671 esp_output_encap_csum(skb);
Herbert Xu000ae7b2015-05-27 16:03:47 +0800672 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
Steffen Klassert03e2a302017-01-17 10:23:03 +0100674 if (sg != dsg)
675 esp_ssg_unref(x, tmp);
Herbert Xub7c6538c2007-10-09 13:33:35 -0700676
Sabrina Dubroca26333c32020-04-27 17:59:35 +0200677 if (!err && x->encap && x->encap->encap_type == TCP_ENCAP_ESPINTCP)
678 err = esp_output_tail_tcp(x, skb);
679
Steffen Klasserte6194922017-07-13 09:13:30 +0200680error_free:
681 kfree(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682error:
683 return err;
684}
Steffen Klassert383d0352017-04-14 10:06:42 +0200685EXPORT_SYMBOL_GPL(esp6_output_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Steffen Klassert383d0352017-04-14 10:06:42 +0200687static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
688{
689 int alen;
690 int blksize;
691 struct ip_esp_hdr *esph;
692 struct crypto_aead *aead;
693 struct esp_info esp;
694
695 esp.inplace = true;
696
697 esp.proto = *skb_mac_header(skb);
698 *skb_mac_header(skb) = IPPROTO_ESP;
699
700 /* skb is pure payload to encrypt */
701
702 aead = x->data;
703 alen = crypto_aead_authsize(aead);
704
705 esp.tfclen = 0;
706 if (x->tfcpad) {
707 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
708 u32 padto;
709
Sabrina Dubrocab515d262021-04-16 11:27:59 +0200710 padto = min(x->tfcpad, __xfrm_state_mtu(x, dst->child_mtu_cached));
Steffen Klassert383d0352017-04-14 10:06:42 +0200711 if (skb->len < padto)
712 esp.tfclen = padto - skb->len;
713 }
714 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
715 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
716 esp.plen = esp.clen - skb->len - esp.tfclen;
717 esp.tailen = esp.tfclen + esp.plen + alen;
718
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200719 esp.esph = ip_esp_hdr(skb);
720
Steffen Klassert383d0352017-04-14 10:06:42 +0200721 esp.nfrags = esp6_output_head(x, skb, &esp);
722 if (esp.nfrags < 0)
723 return esp.nfrags;
724
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200725 esph = esp.esph;
Steffen Klassert383d0352017-04-14 10:06:42 +0200726 esph->spi = x->id.spi;
727
728 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
729 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
730 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
731
732 skb_push(skb, -skb_network_offset(skb));
733
734 return esp6_output_tail(x, skb, &esp);
735}
736
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300737static inline int esp_remove_trailer(struct sk_buff *skb)
Herbert Xu38320c72008-01-28 19:35:05 -0800738{
739 struct xfrm_state *x = xfrm_input_state(skb);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200740 struct xfrm_offload *xo = xfrm_offload(skb);
Mathias Krause1c5ad132013-10-18 12:09:05 +0200741 struct crypto_aead *aead = x->data;
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300742 int alen, hlen, elen;
Ilan Tayarie51a6472017-08-01 12:49:05 +0300743 int padlen, trimlen;
744 __wsum csumdiff;
Herbert Xu38320c72008-01-28 19:35:05 -0800745 u8 nexthdr[2];
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300746 int ret;
Herbert Xu38320c72008-01-28 19:35:05 -0800747
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300748 alen = crypto_aead_authsize(aead);
749 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
750 elen = skb->len - hlen;
Herbert Xu38320c72008-01-28 19:35:05 -0800751
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300752 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
753 ret = xo->proto;
Herbert Xu38320c72008-01-28 19:35:05 -0800754 goto out;
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300755 }
Herbert Xu38320c72008-01-28 19:35:05 -0800756
Gustavo A. R. Silvaeee12df2017-10-26 07:51:06 -0500757 ret = skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2);
758 BUG_ON(ret);
Herbert Xu38320c72008-01-28 19:35:05 -0800759
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300760 ret = -EINVAL;
Herbert Xu38320c72008-01-28 19:35:05 -0800761 padlen = nexthdr[0];
762 if (padlen + 2 + alen >= elen) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800763 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
764 padlen + 2, elen - alen);
Herbert Xu38320c72008-01-28 19:35:05 -0800765 goto out;
766 }
767
Ilan Tayarie51a6472017-08-01 12:49:05 +0300768 trimlen = alen + padlen + 2;
769 if (skb->ip_summed == CHECKSUM_COMPLETE) {
770 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
771 skb->csum = csum_block_sub(skb->csum, csumdiff,
772 skb->len - trimlen);
773 }
774 pskb_trim(skb, skb->len - trimlen);
775
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300776 ret = nexthdr[1];
777
778out:
779 return ret;
780}
781
782int esp6_input_done2(struct sk_buff *skb, int err)
783{
784 struct xfrm_state *x = xfrm_input_state(skb);
785 struct xfrm_offload *xo = xfrm_offload(skb);
786 struct crypto_aead *aead = x->data;
787 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
788 int hdr_len = skb_network_header_len(skb);
789
Jiapeng Chongbf3da522021-02-03 10:44:30 +0800790 if (!xo || !(xo->flags & CRYPTO_DONE))
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300791 kfree(ESP_SKB_CB(skb)->tmp);
792
793 if (unlikely(err))
794 goto out;
795
796 err = esp_remove_trailer(skb);
797 if (unlikely(err < 0))
798 goto out;
799
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200800 if (x->encap) {
801 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Sabrina Dubroca17175d12020-07-03 16:57:09 +0200802 int offset = skb_network_offset(skb) + sizeof(*ip6h);
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200803 struct xfrm_encap_tmpl *encap = x->encap;
Sabrina Dubroca17175d12020-07-03 16:57:09 +0200804 u8 nexthdr = ip6h->nexthdr;
805 __be16 frag_off, source;
806 struct udphdr *uh;
807 struct tcphdr *th;
808
809 offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
Jordy Zomer5f9c55c2021-11-17 20:06:48 +0100810
811 if (offset < 0) {
812 err = -EINVAL;
813 goto out;
814 }
815
Sabrina Dubroca17175d12020-07-03 16:57:09 +0200816 uh = (void *)(skb->data + offset);
817 th = (void *)(skb->data + offset);
Sabrina Dubrocad5dba132020-07-27 16:03:47 +0200818 hdr_len += offset;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200819
820 switch (x->encap->encap_type) {
Sabrina Dubroca26333c32020-04-27 17:59:35 +0200821 case TCP_ENCAP_ESPINTCP:
822 source = th->source;
823 break;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200824 case UDP_ENCAP_ESPINUDP:
825 case UDP_ENCAP_ESPINUDP_NON_IKE:
826 source = uh->source;
827 break;
828 default:
829 WARN_ON_ONCE(1);
830 err = -EINVAL;
831 goto out;
832 }
833
834 /*
835 * 1) if the NAT-T peer's IP or port changed then
836 * advertize the change to the keying daemon.
837 * This is an inbound SA, so just compare
838 * SRC ports.
839 */
840 if (!ipv6_addr_equal(&ip6h->saddr, &x->props.saddr.in6) ||
841 source != encap->encap_sport) {
842 xfrm_address_t ipaddr;
843
844 memcpy(&ipaddr.a6, &ip6h->saddr.s6_addr, sizeof(ipaddr.a6));
845 km_new_mapping(x, &ipaddr, source);
846
847 /* XXX: perhaps add an extra
848 * policy check here, to see
849 * if we should allow or
850 * reject a packet from a
851 * different source
852 * address/port.
853 */
854 }
855
856 /*
857 * 2) ignore UDP/TCP checksums in case
858 * of NAT-T in Transport Mode, or
859 * perform other post-processing fixes
860 * as per draft-ietf-ipsec-udp-encaps-06,
861 * section 3.1.2
862 */
863 if (x->props.mode == XFRM_MODE_TRANSPORT)
864 skb->ip_summed = CHECKSUM_UNNECESSARY;
865 }
866
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300867 skb_postpull_rcsum(skb, skb_network_header(skb),
868 skb_network_header_len(skb));
Ilan Tayarie51a6472017-08-01 12:49:05 +0300869 skb_pull_rcsum(skb, hlen);
Li RongQinga9403f82013-01-08 15:41:12 +0800870 if (x->props.mode == XFRM_MODE_TUNNEL)
871 skb_reset_transport_header(skb);
872 else
873 skb_set_transport_header(skb, -hdr_len);
Herbert Xu38320c72008-01-28 19:35:05 -0800874
Herbert Xu38320c72008-01-28 19:35:05 -0800875 /* RFC4303: Drop dummy packets without any error */
876 if (err == IPPROTO_NONE)
877 err = -EINVAL;
878
879out:
880 return err;
881}
Steffen Klassert383d0352017-04-14 10:06:42 +0200882EXPORT_SYMBOL_GPL(esp6_input_done2);
Herbert Xu38320c72008-01-28 19:35:05 -0800883
884static void esp_input_done(struct crypto_async_request *base, int err)
885{
886 struct sk_buff *skb = base->data;
887
Steffen Klassertf1fbed02017-04-14 10:06:21 +0200888 xfrm_input_resume(skb, esp6_input_done2(skb, err));
Herbert Xu38320c72008-01-28 19:35:05 -0800889}
890
Herbert Xu000ae7b2015-05-27 16:03:47 +0800891static void esp_input_restore_header(struct sk_buff *skb)
892{
893 esp_restore_header(skb, 0);
894 __skb_pull(skb, 4);
895}
896
Steffen Klassert03e2a302017-01-17 10:23:03 +0100897static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
898{
899 struct xfrm_state *x = xfrm_input_state(skb);
Steffen Klassert03e2a302017-01-17 10:23:03 +0100900
901 /* For ESN we move the header forward by 4 bytes to
902 * accomodate the high bits. We will move it back after
903 * decryption.
904 */
905 if ((x->props.flags & XFRM_STATE_ESN)) {
Colin Ian Kingd3cc5472017-10-19 15:09:47 +0200906 struct ip_esp_hdr *esph = skb_push(skb, 4);
907
Steffen Klassert03e2a302017-01-17 10:23:03 +0100908 *seqhi = esph->spi;
909 esph->spi = esph->seq_no;
910 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
911 }
912}
913
Herbert Xu000ae7b2015-05-27 16:03:47 +0800914static void esp_input_done_esn(struct crypto_async_request *base, int err)
915{
916 struct sk_buff *skb = base->data;
917
918 esp_input_restore_header(skb);
919 esp_input_done(base, err);
920}
921
Herbert Xue6956332006-04-01 00:52:46 -0800922static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923{
Mathias Krause1c5ad132013-10-18 12:09:05 +0200924 struct crypto_aead *aead = x->data;
Herbert Xu38320c72008-01-28 19:35:05 -0800925 struct aead_request *req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 struct sk_buff *trailer;
Herbert Xu000ae7b2015-05-27 16:03:47 +0800927 int ivlen = crypto_aead_ivsize(aead);
Haishuang Yan0c05f982018-08-17 15:51:00 +0800928 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 int nfrags;
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000930 int assoclen;
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000931 int seqhilen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 int ret = 0;
Herbert Xu38320c72008-01-28 19:35:05 -0800933 void *tmp;
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000934 __be32 *seqhi;
Herbert Xu38320c72008-01-28 19:35:05 -0800935 u8 *iv;
936 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
Haishuang Yan0c05f982018-08-17 15:51:00 +0800938 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 ret = -EINVAL;
Herbert Xu31a4ab92006-05-27 23:06:13 -0700940 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
942
Herbert Xu38320c72008-01-28 19:35:05 -0800943 if (elen <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 ret = -EINVAL;
Herbert Xu31a4ab92006-05-27 23:06:13 -0700945 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 }
947
Haishuang Yan0c05f982018-08-17 15:51:00 +0800948 assoclen = sizeof(struct ip_esp_hdr);
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000949 seqhilen = 0;
950
951 if (x->props.flags & XFRM_STATE_ESN) {
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000952 seqhilen += sizeof(__be32);
953 assoclen += seqhilen;
954 }
955
Steffen Klassert03e2a302017-01-17 10:23:03 +0100956 if (!skb_cloned(skb)) {
957 if (!skb_is_nonlinear(skb)) {
958 nfrags = 1;
959
960 goto skip_cow;
961 } else if (!skb_has_frag_list(skb)) {
962 nfrags = skb_shinfo(skb)->nr_frags;
963 nfrags++;
964
965 goto skip_cow;
966 }
967 }
968
969 nfrags = skb_cow_data(skb, 0, &trailer);
970 if (nfrags < 0) {
971 ret = -EINVAL;
972 goto out;
973 }
974
975skip_cow:
976 ret = -ENOMEM;
Herbert Xu000ae7b2015-05-27 16:03:47 +0800977 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
Herbert Xu38320c72008-01-28 19:35:05 -0800978 if (!tmp)
Herbert Xu0ebea8e2007-11-13 21:45:58 -0800979 goto out;
980
Herbert Xu38320c72008-01-28 19:35:05 -0800981 ESP_SKB_CB(skb)->tmp = tmp;
Sabrina Dubroca0146dca2020-04-27 17:59:34 +0200982 seqhi = esp_tmp_extra(tmp);
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000983 iv = esp_tmp_iv(aead, tmp, seqhilen);
Herbert Xu38320c72008-01-28 19:35:05 -0800984 req = esp_tmp_req(aead, iv);
Herbert Xu000ae7b2015-05-27 16:03:47 +0800985 sg = esp_req_sg(aead, req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Steffen Klassert03e2a302017-01-17 10:23:03 +0100987 esp_input_set_header(skb, seqhi);
Thomas Graf95a02cf2007-12-10 16:53:29 -0800988
Herbert Xu38320c72008-01-28 19:35:05 -0800989 sg_init_table(sg, nfrags);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200990 ret = skb_to_sgvec(skb, sg, 0, skb->len);
Zhen Lei7284fdf2018-06-27 11:49:28 +0800991 if (unlikely(ret < 0)) {
992 kfree(tmp);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200993 goto out;
Zhen Lei7284fdf2018-06-27 11:49:28 +0800994 }
Steffen Klassertd212a4c2011-03-08 00:07:51 +0000995
Steffen Klassert03e2a302017-01-17 10:23:03 +0100996 skb->ip_summed = CHECKSUM_NONE;
997
998 if ((x->props.flags & XFRM_STATE_ESN))
999 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
1000 else
1001 aead_request_set_callback(req, 0, esp_input_done, skb);
1002
Herbert Xu000ae7b2015-05-27 16:03:47 +08001003 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
1004 aead_request_set_ad(req, assoclen);
Herbert Xu38320c72008-01-28 19:35:05 -08001005
1006 ret = crypto_aead_decrypt(req);
1007 if (ret == -EINPROGRESS)
1008 goto out;
1009
Herbert Xu000ae7b2015-05-27 16:03:47 +08001010 if ((x->props.flags & XFRM_STATE_ESN))
1011 esp_input_restore_header(skb);
1012
Steffen Klassertf1fbed02017-04-14 10:06:21 +02001013 ret = esp6_input_done2(skb, ret);
Herbert Xu38320c72008-01-28 19:35:05 -08001014
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015out:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 return ret;
1017}
1018
Steffen Klassertd5860c52014-03-14 07:28:07 +01001019static int esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
1020 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
Alexey Dobriyan4fb236b2008-11-25 17:59:27 -08001022 struct net *net = dev_net(skb->dev);
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001023 const struct ipv6hdr *iph = (const struct ipv6hdr *)skb->data;
Herbert Xu87bdc482007-10-10 15:45:25 -07001024 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 struct xfrm_state *x;
1026
Steffen Klassertb3b2b9e2013-09-10 13:43:09 +02001027 if (type != ICMPV6_PKT_TOOBIG &&
David S. Millerec18d9a2012-07-12 00:25:15 -07001028 type != NDISC_REDIRECT)
Steffen Klassertd5860c52014-03-14 07:28:07 +01001029 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001031 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
1032 esph->spi, IPPROTO_ESP, AF_INET6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 if (!x)
Steffen Klassertd5860c52014-03-14 07:28:07 +01001034 return 0;
David S. Millerec18d9a2012-07-12 00:25:15 -07001035
1036 if (type == NDISC_REDIRECT)
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09001037 ip6_redirect(skb, net, skb->dev->ifindex, 0,
1038 sock_net_uid(net, NULL));
David S. Millerec18d9a2012-07-12 00:25:15 -07001039 else
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09001040 ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 xfrm_state_put(x);
Steffen Klassertd5860c52014-03-14 07:28:07 +01001042
1043 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044}
1045
1046static void esp6_destroy(struct xfrm_state *x)
1047{
Mathias Krause1c5ad132013-10-18 12:09:05 +02001048 struct crypto_aead *aead = x->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Mathias Krause1c5ad132013-10-18 12:09:05 +02001050 if (!aead)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 return;
1052
Mathias Krause1c5ad132013-10-18 12:09:05 +02001053 crypto_free_aead(aead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054}
1055
Herbert Xu1a6509d2008-01-28 19:37:29 -08001056static int esp_init_aead(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057{
Herbert Xu000ae7b2015-05-27 16:03:47 +08001058 char aead_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu1a6509d2008-01-28 19:37:29 -08001059 struct crypto_aead *aead;
1060 int err;
1061
Herbert Xu000ae7b2015-05-27 16:03:47 +08001062 err = -ENAMETOOLONG;
1063 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
1064 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
1065 goto error;
1066
Steffen Klassertf58869c2017-12-20 10:41:53 +01001067 aead = crypto_alloc_aead(aead_name, 0, 0);
Herbert Xu1a6509d2008-01-28 19:37:29 -08001068 err = PTR_ERR(aead);
1069 if (IS_ERR(aead))
1070 goto error;
1071
Mathias Krause1c5ad132013-10-18 12:09:05 +02001072 x->data = aead;
Herbert Xu1a6509d2008-01-28 19:37:29 -08001073
1074 err = crypto_aead_setkey(aead, x->aead->alg_key,
1075 (x->aead->alg_key_len + 7) / 8);
1076 if (err)
1077 goto error;
1078
1079 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
1080 if (err)
1081 goto error;
1082
1083error:
1084 return err;
1085}
1086
1087static int esp_init_authenc(struct xfrm_state *x)
1088{
Herbert Xu38320c72008-01-28 19:35:05 -08001089 struct crypto_aead *aead;
1090 struct crypto_authenc_key_param *param;
1091 struct rtattr *rta;
1092 char *key;
1093 char *p;
1094 char authenc_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu38320c72008-01-28 19:35:05 -08001095 unsigned int keylen;
1096 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097
Herbert Xu1a6509d2008-01-28 19:37:29 -08001098 err = -EINVAL;
Ian Morris63159f22015-03-29 14:00:04 +01001099 if (!x->ealg)
Herbert Xu1a6509d2008-01-28 19:37:29 -08001100 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Herbert Xu1a6509d2008-01-28 19:37:29 -08001102 err = -ENAMETOOLONG;
Steffen Klassertd212a4c2011-03-08 00:07:51 +00001103
1104 if ((x->props.flags & XFRM_STATE_ESN)) {
1105 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
Herbert Xu000ae7b2015-05-27 16:03:47 +08001106 "%s%sauthencesn(%s,%s)%s",
1107 x->geniv ?: "", x->geniv ? "(" : "",
Steffen Klassertd212a4c2011-03-08 00:07:51 +00001108 x->aalg ? x->aalg->alg_name : "digest_null",
Herbert Xu000ae7b2015-05-27 16:03:47 +08001109 x->ealg->alg_name,
1110 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
Steffen Klassertd212a4c2011-03-08 00:07:51 +00001111 goto error;
1112 } else {
1113 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
Herbert Xu000ae7b2015-05-27 16:03:47 +08001114 "%s%sauthenc(%s,%s)%s",
1115 x->geniv ?: "", x->geniv ? "(" : "",
Steffen Klassertd212a4c2011-03-08 00:07:51 +00001116 x->aalg ? x->aalg->alg_name : "digest_null",
Herbert Xu000ae7b2015-05-27 16:03:47 +08001117 x->ealg->alg_name,
1118 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
Steffen Klassertd212a4c2011-03-08 00:07:51 +00001119 goto error;
1120 }
Herbert Xu38320c72008-01-28 19:35:05 -08001121
Steffen Klassertf58869c2017-12-20 10:41:53 +01001122 aead = crypto_alloc_aead(authenc_name, 0, 0);
Herbert Xu38320c72008-01-28 19:35:05 -08001123 err = PTR_ERR(aead);
1124 if (IS_ERR(aead))
1125 goto error;
1126
Mathias Krause1c5ad132013-10-18 12:09:05 +02001127 x->data = aead;
Herbert Xu38320c72008-01-28 19:35:05 -08001128
1129 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
1130 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
1131 err = -ENOMEM;
1132 key = kmalloc(keylen, GFP_KERNEL);
1133 if (!key)
1134 goto error;
1135
1136 p = key;
1137 rta = (void *)p;
1138 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
1139 rta->rta_len = RTA_LENGTH(sizeof(*param));
1140 param = RTA_DATA(rta);
1141 p += RTA_SPACE(sizeof(*param));
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 if (x->aalg) {
1144 struct xfrm_algo_desc *aalg_desc;
1145
Herbert Xu38320c72008-01-28 19:35:05 -08001146 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
1147 p += (x->aalg->alg_key_len + 7) / 8;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
1150 BUG_ON(!aalg_desc);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001151
Herbert Xu38320c72008-01-28 19:35:05 -08001152 err = -EINVAL;
Joe Perches45083492014-11-05 15:36:08 -08001153 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
Herbert Xu38320c72008-01-28 19:35:05 -08001154 crypto_aead_authsize(aead)) {
Arnd Bergmann6ad2dd62021-03-22 12:56:49 +01001155 pr_info("ESP: %s digestsize %u != %u\n",
Joe Perches45083492014-11-05 15:36:08 -08001156 x->aalg->alg_name,
1157 crypto_aead_authsize(aead),
1158 aalg_desc->uinfo.auth.icv_fullbits / 8);
Herbert Xu38320c72008-01-28 19:35:05 -08001159 goto free_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 }
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001161
Herbert Xu38320c72008-01-28 19:35:05 -08001162 err = crypto_aead_setauthsize(
Martin Willi8f8a0882009-11-25 00:29:53 +00001163 aead, x->aalg->alg_trunc_len / 8);
Herbert Xu38320c72008-01-28 19:35:05 -08001164 if (err)
1165 goto free_key;
1166 }
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001167
Herbert Xu38320c72008-01-28 19:35:05 -08001168 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
1169 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
1170
1171 err = crypto_aead_setkey(aead, key, keylen);
1172
1173free_key:
1174 kfree(key);
1175
Herbert Xu1a6509d2008-01-28 19:37:29 -08001176error:
1177 return err;
1178}
1179
1180static int esp6_init_state(struct xfrm_state *x)
1181{
Herbert Xu1a6509d2008-01-28 19:37:29 -08001182 struct crypto_aead *aead;
1183 u32 align;
1184 int err;
1185
Mathias Krause1c5ad132013-10-18 12:09:05 +02001186 x->data = NULL;
Herbert Xu1a6509d2008-01-28 19:37:29 -08001187
1188 if (x->aead)
1189 err = esp_init_aead(x);
1190 else
1191 err = esp_init_authenc(x);
1192
Herbert Xu38320c72008-01-28 19:35:05 -08001193 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 goto error;
Herbert Xu38320c72008-01-28 19:35:05 -08001195
Mathias Krause1c5ad132013-10-18 12:09:05 +02001196 aead = x->data;
Herbert Xu1a6509d2008-01-28 19:37:29 -08001197
Herbert Xu38320c72008-01-28 19:35:05 -08001198 x->props.header_len = sizeof(struct ip_esp_hdr) +
1199 crypto_aead_ivsize(aead);
Herbert Xuca681452007-10-17 21:35:15 -07001200 switch (x->props.mode) {
1201 case XFRM_MODE_BEET:
Joakim Koskelaabf5cdb2008-08-06 02:40:25 -07001202 if (x->sel.family != AF_INET6)
1203 x->props.header_len += IPV4_BEET_PHMAXLEN +
Ian Morris67ba4152014-08-24 21:53:10 +01001204 (sizeof(struct ipv6hdr) - sizeof(struct iphdr));
Joakim Koskelaabf5cdb2008-08-06 02:40:25 -07001205 break;
Herbert Xubcfd09f2018-01-05 22:12:32 +11001206 default:
Herbert Xuca681452007-10-17 21:35:15 -07001207 case XFRM_MODE_TRANSPORT:
1208 break;
1209 case XFRM_MODE_TUNNEL:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 x->props.header_len += sizeof(struct ipv6hdr);
Masahide NAKAMURAea2c47b2007-10-22 02:30:15 -07001211 break;
Herbert Xuca681452007-10-17 21:35:15 -07001212 }
Herbert Xu38320c72008-01-28 19:35:05 -08001213
Sabrina Dubroca0146dca2020-04-27 17:59:34 +02001214 if (x->encap) {
1215 struct xfrm_encap_tmpl *encap = x->encap;
1216
1217 switch (encap->encap_type) {
1218 default:
1219 err = -EINVAL;
1220 goto error;
1221 case UDP_ENCAP_ESPINUDP:
1222 x->props.header_len += sizeof(struct udphdr);
1223 break;
1224 case UDP_ENCAP_ESPINUDP_NON_IKE:
1225 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1226 break;
Sabrina Dubroca26333c32020-04-27 17:59:35 +02001227#ifdef CONFIG_INET6_ESPINTCP
1228 case TCP_ENCAP_ESPINTCP:
1229 /* only the length field, TCP encap is done by
1230 * the socket
1231 */
1232 x->props.header_len += 2;
1233 break;
1234#endif
Sabrina Dubroca0146dca2020-04-27 17:59:34 +02001235 }
1236 }
1237
Herbert Xu38320c72008-01-28 19:35:05 -08001238 align = ALIGN(crypto_aead_blocksize(aead), 4);
Mathias Krause1c5ad132013-10-18 12:09:05 +02001239 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
1241error:
Herbert Xu38320c72008-01-28 19:35:05 -08001242 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243}
1244
Steffen Klassertd5860c52014-03-14 07:28:07 +01001245static int esp6_rcv_cb(struct sk_buff *skb, int err)
1246{
1247 return 0;
1248}
1249
Ian Morriscc24bec2014-08-24 21:53:11 +01001250static const struct xfrm_type esp6_type = {
Ian Morriscc24bec2014-08-24 21:53:11 +01001251 .owner = THIS_MODULE,
1252 .proto = IPPROTO_ESP,
Herbert Xu436a0a42007-10-08 17:25:53 -07001253 .flags = XFRM_TYPE_REPLAY_PROT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 .init_state = esp6_init_state,
1255 .destructor = esp6_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 .input = esp6_input,
Masahide NAKAMURAaee5adb2006-08-23 17:57:28 -07001257 .output = esp6_output,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258};
1259
Steffen Klassertd5860c52014-03-14 07:28:07 +01001260static struct xfrm6_protocol esp6_protocol = {
1261 .handler = xfrm6_rcv,
Sabrina Dubroca0146dca2020-04-27 17:59:34 +02001262 .input_handler = xfrm_input,
Steffen Klassertd5860c52014-03-14 07:28:07 +01001263 .cb_handler = esp6_rcv_cb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 .err_handler = esp6_err,
Steffen Klassertd5860c52014-03-14 07:28:07 +01001265 .priority = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266};
1267
1268static int __init esp6_init(void)
1269{
1270 if (xfrm_register_type(&esp6_type, AF_INET6) < 0) {
Joe Perchesf3213832012-05-15 14:11:53 +00001271 pr_info("%s: can't add xfrm type\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 return -EAGAIN;
1273 }
Steffen Klassertd5860c52014-03-14 07:28:07 +01001274 if (xfrm6_protocol_register(&esp6_protocol, IPPROTO_ESP) < 0) {
Joe Perchesf3213832012-05-15 14:11:53 +00001275 pr_info("%s: can't add protocol\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 xfrm_unregister_type(&esp6_type, AF_INET6);
1277 return -EAGAIN;
1278 }
1279
1280 return 0;
1281}
1282
1283static void __exit esp6_fini(void)
1284{
Steffen Klassertd5860c52014-03-14 07:28:07 +01001285 if (xfrm6_protocol_deregister(&esp6_protocol, IPPROTO_ESP) < 0)
Joe Perchesf3213832012-05-15 14:11:53 +00001286 pr_info("%s: can't remove protocol\n", __func__);
Florian Westphal4f518e82019-05-03 17:46:19 +02001287 xfrm_unregister_type(&esp6_type, AF_INET6);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288}
1289
1290module_init(esp6_init);
1291module_exit(esp6_fini);
1292
1293MODULE_LICENSE("GPL");
Masahide NAKAMURAd3d6dd32007-06-26 23:57:49 -07001294MODULE_ALIAS_XFRM_TYPE(AF_INET6, XFRM_PROTO_ESP);