blob: b9ae955760844b7bbac862ff0d2cdaccbb48fc2b [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Joe Perchesafd465032012-03-12 07:03:32 +00002#define pr_fmt(fmt) "IPsec: " fmt
3
Herbert Xu38320c72008-01-28 19:35:05 -08004#include <crypto/aead.h>
5#include <crypto/authenc.h>
Herbert Xu6b7326c2006-07-30 15:41:01 +10006#include <linux/err.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/module.h>
8#include <net/ip.h>
9#include <net/xfrm.h>
10#include <net/esp.h>
Adrian Bunk72998d82007-10-26 22:53:58 -070011#include <linux/scatterlist.h>
Herbert Xua02a6422005-10-10 21:11:08 -070012#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/pfkeyv2.h>
Herbert Xu38320c72008-01-28 19:35:05 -080014#include <linux/rtnetlink.h>
15#include <linux/slab.h>
Herbert Xub7c65382007-10-09 13:33:35 -070016#include <linux/spinlock.h>
Thomas Graf2017a722007-12-10 16:53:05 -080017#include <linux/in6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <net/icmp.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020019#include <net/protocol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <net/udp.h>
21
Steffen Klassertcac26612017-01-17 10:22:57 +010022#include <linux/highmem.h>
23
Herbert Xu38320c72008-01-28 19:35:05 -080024struct esp_skb_cb {
25 struct xfrm_skb_cb xfrm;
26 void *tmp;
27};
28
Herbert Xu962fcef2016-06-18 13:03:36 +080029struct esp_output_extra {
30 __be32 seqhi;
31 u32 esphoff;
32};
33
Herbert Xu38320c72008-01-28 19:35:05 -080034#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
35
Martin Willid979e202010-12-08 04:37:50 +000036static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
37
Herbert Xu38320c72008-01-28 19:35:05 -080038/*
39 * Allocate an AEAD request structure with extra space for SG and IV.
40 *
41 * For alignment considerations the IV is placed at the front, followed
42 * by the request and finally the SG list.
43 *
44 * TODO: Use spare space in skb for this where possible.
45 */
Herbert Xu962fcef2016-06-18 13:03:36 +080046static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int extralen)
Herbert Xu38320c72008-01-28 19:35:05 -080047{
48 unsigned int len;
49
Herbert Xu962fcef2016-06-18 13:03:36 +080050 len = extralen;
Steffen Klassert0dc49e92011-03-08 00:07:14 +000051
52 len += crypto_aead_ivsize(aead);
53
Herbert Xu38320c72008-01-28 19:35:05 -080054 if (len) {
55 len += crypto_aead_alignmask(aead) &
56 ~(crypto_tfm_ctx_alignment() - 1);
57 len = ALIGN(len, crypto_tfm_ctx_alignment());
58 }
59
Herbert Xu7021b2e2015-05-27 16:03:46 +080060 len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
Herbert Xu38320c72008-01-28 19:35:05 -080061 len = ALIGN(len, __alignof__(struct scatterlist));
62
63 len += sizeof(struct scatterlist) * nfrags;
64
65 return kmalloc(len, GFP_ATOMIC);
66}
67
Herbert Xu962fcef2016-06-18 13:03:36 +080068static inline void *esp_tmp_extra(void *tmp)
Steffen Klassert0dc49e92011-03-08 00:07:14 +000069{
Herbert Xu962fcef2016-06-18 13:03:36 +080070 return PTR_ALIGN(tmp, __alignof__(struct esp_output_extra));
Steffen Klassert0dc49e92011-03-08 00:07:14 +000071}
Herbert Xu962fcef2016-06-18 13:03:36 +080072
73static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int extralen)
Herbert Xu38320c72008-01-28 19:35:05 -080074{
75 return crypto_aead_ivsize(aead) ?
Herbert Xu962fcef2016-06-18 13:03:36 +080076 PTR_ALIGN((u8 *)tmp + extralen,
77 crypto_aead_alignmask(aead) + 1) : tmp + extralen;
Herbert Xu38320c72008-01-28 19:35:05 -080078}
79
Herbert Xu38320c72008-01-28 19:35:05 -080080static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
81{
82 struct aead_request *req;
83
84 req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
85 crypto_tfm_ctx_alignment());
86 aead_request_set_tfm(req, aead);
87 return req;
88}
89
90static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
91 struct aead_request *req)
92{
93 return (void *)ALIGN((unsigned long)(req + 1) +
94 crypto_aead_reqsize(aead),
95 __alignof__(struct scatterlist));
96}
97
Steffen Klassertcac26612017-01-17 10:22:57 +010098static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
99{
100 struct esp_output_extra *extra = esp_tmp_extra(tmp);
101 struct crypto_aead *aead = x->data;
102 int extralen = 0;
103 u8 *iv;
104 struct aead_request *req;
105 struct scatterlist *sg;
106
107 if (x->props.flags & XFRM_STATE_ESN)
108 extralen += sizeof(*extra);
109
110 extra = esp_tmp_extra(tmp);
111 iv = esp_tmp_iv(aead, tmp, extralen);
112 req = esp_tmp_req(aead, iv);
113
114 /* Unref skb_frag_pages in the src scatterlist if necessary.
115 * Skip the first sg which comes from skb->data.
116 */
117 if (req->src != req->dst)
118 for (sg = sg_next(req->src); sg; sg = sg_next(sg))
119 put_page(sg_page(sg));
120}
121
Herbert Xu38320c72008-01-28 19:35:05 -0800122static void esp_output_done(struct crypto_async_request *base, int err)
123{
124 struct sk_buff *skb = base->data;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100125 struct xfrm_offload *xo = xfrm_offload(skb);
Steffen Klassertcac26612017-01-17 10:22:57 +0100126 void *tmp;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100127 struct xfrm_state *x;
128
Florian Westphal2294be0f2018-12-18 17:15:20 +0100129 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
130 struct sec_path *sp = skb_sec_path(skb);
131
132 x = sp->xvec[sp->len - 1];
133 } else {
Steffen Klassertf53c7232017-12-20 10:41:36 +0100134 x = skb_dst(skb)->xfrm;
Florian Westphal2294be0f2018-12-18 17:15:20 +0100135 }
Herbert Xu38320c72008-01-28 19:35:05 -0800136
Steffen Klassertcac26612017-01-17 10:22:57 +0100137 tmp = ESP_SKB_CB(skb)->tmp;
138 esp_ssg_unref(x, tmp);
139 kfree(tmp);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100140
141 if (xo && (xo->flags & XFRM_DEV_RESUME)) {
142 if (err) {
143 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
144 kfree_skb(skb);
145 return;
146 }
147
148 skb_push(skb, skb->data - skb_mac_header(skb));
149 secpath_reset(skb);
150 xfrm_dev_resume(skb);
151 } else {
152 xfrm_output_resume(skb, err);
153 }
Herbert Xu38320c72008-01-28 19:35:05 -0800154}
155
Herbert Xu7021b2e2015-05-27 16:03:46 +0800156/* Move ESP header back into place. */
157static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
158{
159 struct ip_esp_hdr *esph = (void *)(skb->data + offset);
160 void *tmp = ESP_SKB_CB(skb)->tmp;
Herbert Xu962fcef2016-06-18 13:03:36 +0800161 __be32 *seqhi = esp_tmp_extra(tmp);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800162
163 esph->seq_no = esph->spi;
164 esph->spi = *seqhi;
165}
166
167static void esp_output_restore_header(struct sk_buff *skb)
168{
Herbert Xu962fcef2016-06-18 13:03:36 +0800169 void *tmp = ESP_SKB_CB(skb)->tmp;
170 struct esp_output_extra *extra = esp_tmp_extra(tmp);
171
172 esp_restore_header(skb, skb_transport_offset(skb) + extra->esphoff -
173 sizeof(__be32));
Herbert Xu7021b2e2015-05-27 16:03:46 +0800174}
175
Steffen Klassertcac26612017-01-17 10:22:57 +0100176static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200177 struct xfrm_state *x,
Steffen Klassertcac26612017-01-17 10:22:57 +0100178 struct ip_esp_hdr *esph,
179 struct esp_output_extra *extra)
180{
Steffen Klassertcac26612017-01-17 10:22:57 +0100181 /* For ESN we move the header forward by 4 bytes to
182 * accomodate the high bits. We will move it back after
183 * encryption.
184 */
185 if ((x->props.flags & XFRM_STATE_ESN)) {
Steffen Klassert7862b402017-04-14 10:06:50 +0200186 __u32 seqhi;
187 struct xfrm_offload *xo = xfrm_offload(skb);
188
189 if (xo)
190 seqhi = xo->seq.hi;
191 else
192 seqhi = XFRM_SKB_CB(skb)->seq.output.hi;
193
Steffen Klassertcac26612017-01-17 10:22:57 +0100194 extra->esphoff = (unsigned char *)esph -
195 skb_transport_header(skb);
196 esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
197 extra->seqhi = esph->spi;
Steffen Klassert7862b402017-04-14 10:06:50 +0200198 esph->seq_no = htonl(seqhi);
Steffen Klassertcac26612017-01-17 10:22:57 +0100199 }
200
201 esph->spi = x->id.spi;
202
203 return esph;
204}
205
Herbert Xu7021b2e2015-05-27 16:03:46 +0800206static void esp_output_done_esn(struct crypto_async_request *base, int err)
207{
208 struct sk_buff *skb = base->data;
209
210 esp_output_restore_header(skb);
211 esp_output_done(base, err);
212}
213
Steffen Klasserteb758c82017-01-17 10:23:08 +0100214static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
215{
216 /* Fill padding... */
217 if (tfclen) {
218 memset(tail, 0, tfclen);
219 tail += tfclen;
220 }
221 do {
222 int i;
223 for (i = 0; i < plen - 2; i++)
224 tail[i] = i + 1;
225 } while (0);
226 tail[plen - 2] = plen - 2;
227 tail[plen - 1] = proto;
228}
229
Sabrina Dubroca8dfb4eb2019-03-25 14:30:00 +0100230static int esp_output_udp_encap(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231{
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200232 int encap_type;
233 struct udphdr *uh;
234 __be32 *udpdata32;
235 __be16 sport, dport;
236 struct xfrm_encap_tmpl *encap = x->encap;
237 struct ip_esp_hdr *esph = esp->esph;
Sabrina Dubroca8dfb4eb2019-03-25 14:30:00 +0100238 unsigned int len;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200239
240 spin_lock_bh(&x->lock);
241 sport = encap->encap_sport;
242 dport = encap->encap_dport;
243 encap_type = encap->encap_type;
244 spin_unlock_bh(&x->lock);
245
Sabrina Dubroca8dfb4eb2019-03-25 14:30:00 +0100246 len = skb->len + esp->tailen - skb_transport_offset(skb);
247 if (len + sizeof(struct iphdr) >= IP_MAX_MTU)
248 return -EMSGSIZE;
249
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200250 uh = (struct udphdr *)esph;
251 uh->source = sport;
252 uh->dest = dport;
Sabrina Dubroca8dfb4eb2019-03-25 14:30:00 +0100253 uh->len = htons(len);
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200254 uh->check = 0;
255
256 switch (encap_type) {
257 default:
258 case UDP_ENCAP_ESPINUDP:
259 esph = (struct ip_esp_hdr *)(uh + 1);
260 break;
261 case UDP_ENCAP_ESPINUDP_NON_IKE:
262 udpdata32 = (__be32 *)(uh + 1);
263 udpdata32[0] = udpdata32[1] = 0;
264 esph = (struct ip_esp_hdr *)(udpdata32 + 2);
265 break;
266 }
267
268 *skb_mac_header(skb) = IPPROTO_UDP;
269 esp->esph = esph;
Sabrina Dubroca8dfb4eb2019-03-25 14:30:00 +0100270
271 return 0;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200272}
273
274int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
275{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700276 u8 *tail;
Steffen Klassertcac26612017-01-17 10:22:57 +0100277 u8 *vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 int nfrags;
Steffen Klassert0e78a872017-05-03 08:44:27 +0200279 int esph_offset;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200280 struct page *page;
281 struct sk_buff *trailer;
282 int tailen = esp->tailen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* this is non-NULL only with UDP Encapsulation */
Sabrina Dubroca8dfb4eb2019-03-25 14:30:00 +0100285 if (x->encap) {
286 int err = esp_output_udp_encap(x, skb, esp);
287
288 if (err < 0)
289 return err;
290 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Steffen Klassertcac26612017-01-17 10:22:57 +0100292 if (!skb_cloned(skb)) {
Steffen Klassert54ffd792017-08-25 07:34:35 +0200293 if (tailen <= skb_tailroom(skb)) {
Steffen Klassertcac26612017-01-17 10:22:57 +0100294 nfrags = 1;
295 trailer = skb;
296 tail = skb_tail_pointer(trailer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Steffen Klassertcac26612017-01-17 10:22:57 +0100298 goto skip_cow;
299 } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
300 && !skb_has_frag_list(skb)) {
301 int allocsize;
302 struct sock *sk = skb->sk;
303 struct page_frag *pfrag = &x->xfrag;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800304
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200305 esp->inplace = false;
306
Steffen Klassertcac26612017-01-17 10:22:57 +0100307 allocsize = ALIGN(tailen, L1_CACHE_BYTES);
308
309 spin_lock_bh(&x->lock);
310
311 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
312 spin_unlock_bh(&x->lock);
313 goto cow;
314 }
315
316 page = pfrag->page;
317 get_page(page);
318
319 vaddr = kmap_atomic(page);
320
321 tail = vaddr + pfrag->offset;
322
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200323 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
Steffen Klassertcac26612017-01-17 10:22:57 +0100324
325 kunmap_atomic(vaddr);
326
327 nfrags = skb_shinfo(skb)->nr_frags;
328
329 __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
330 tailen);
331 skb_shinfo(skb)->nr_frags = ++nfrags;
332
333 pfrag->offset = pfrag->offset + allocsize;
Steffen Klassert36ff0dd2017-08-25 07:16:07 +0200334
335 spin_unlock_bh(&x->lock);
336
Steffen Klassertcac26612017-01-17 10:22:57 +0100337 nfrags++;
338
339 skb->len += tailen;
340 skb->data_len += tailen;
341 skb->truesize += tailen;
Martin Willi09db5122019-01-28 09:35:35 +0100342 if (sk && sk_fullsock(sk))
Reshetova, Elena14afee42017-06-30 13:08:00 +0300343 refcount_add(tailen, &sk->sk_wmem_alloc);
Steffen Klassertcac26612017-01-17 10:22:57 +0100344
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200345 goto out;
Steffen Klassertcac26612017-01-17 10:22:57 +0100346 }
Herbert Xu7021b2e2015-05-27 16:03:46 +0800347 }
348
Steffen Klassertcac26612017-01-17 10:22:57 +0100349cow:
Steffen Klassert0e78a872017-05-03 08:44:27 +0200350 esph_offset = (unsigned char *)esp->esph - skb_transport_header(skb);
351
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200352 nfrags = skb_cow_data(skb, tailen, &trailer);
353 if (nfrags < 0)
354 goto out;
Steffen Klassertcac26612017-01-17 10:22:57 +0100355 tail = skb_tail_pointer(trailer);
Steffen Klassert0e78a872017-05-03 08:44:27 +0200356 esp->esph = (struct ip_esp_hdr *)(skb_transport_header(skb) + esph_offset);
Steffen Klassertcac26612017-01-17 10:22:57 +0100357
358skip_cow:
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200359 esp_output_fill_trailer(tail, esp->tfclen, esp->plen, esp->proto);
360 pskb_put(skb, trailer, tailen);
Steffen Klassertcac26612017-01-17 10:22:57 +0100361
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200362out:
363 return nfrags;
364}
365EXPORT_SYMBOL_GPL(esp_output_head);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800366
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200367int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp)
368{
369 u8 *iv;
370 int alen;
371 void *tmp;
372 int ivlen;
373 int assoclen;
374 int extralen;
375 struct page *page;
376 struct ip_esp_hdr *esph;
377 struct crypto_aead *aead;
378 struct aead_request *req;
379 struct scatterlist *sg, *dsg;
380 struct esp_output_extra *extra;
381 int err = -ENOMEM;
382
383 assoclen = sizeof(struct ip_esp_hdr);
384 extralen = 0;
385
386 if (x->props.flags & XFRM_STATE_ESN) {
387 extralen += sizeof(*extra);
388 assoclen += sizeof(__be32);
389 }
390
391 aead = x->data;
392 alen = crypto_aead_authsize(aead);
393 ivlen = crypto_aead_ivsize(aead);
394
395 tmp = esp_alloc_tmp(aead, esp->nfrags + 2, extralen);
Steffen Klasserte892d2d2017-04-24 07:33:56 +0200396 if (!tmp)
Steffen Klassertcac26612017-01-17 10:22:57 +0100397 goto error;
Steffen Klassertcac26612017-01-17 10:22:57 +0100398
399 extra = esp_tmp_extra(tmp);
400 iv = esp_tmp_iv(aead, tmp, extralen);
401 req = esp_tmp_req(aead, iv);
402 sg = esp_req_sg(aead, req);
Steffen Klassertcac26612017-01-17 10:22:57 +0100403
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200404 if (esp->inplace)
405 dsg = sg;
406 else
407 dsg = &sg[esp->nfrags];
Steffen Klassertcac26612017-01-17 10:22:57 +0100408
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200409 esph = esp_output_set_extra(skb, x, esp->esph, extra);
410 esp->esph = esph;
411
412 sg_init_table(sg, esp->nfrags);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200413 err = skb_to_sgvec(skb, sg,
414 (unsigned char *)esph - skb->data,
415 assoclen + ivlen + esp->clen + alen);
416 if (unlikely(err < 0))
Steffen Klasserte6194922017-07-13 09:13:30 +0200417 goto error_free;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000418
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200419 if (!esp->inplace) {
420 int allocsize;
421 struct page_frag *pfrag = &x->xfrag;
422
423 allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
424
425 spin_lock_bh(&x->lock);
426 if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
427 spin_unlock_bh(&x->lock);
Steffen Klasserte6194922017-07-13 09:13:30 +0200428 goto error_free;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200429 }
430
431 skb_shinfo(skb)->nr_frags = 1;
432
433 page = pfrag->page;
434 get_page(page);
435 /* replace page frags in skb with new page */
436 __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
437 pfrag->offset = pfrag->offset + allocsize;
438 spin_unlock_bh(&x->lock);
439
440 sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200441 err = skb_to_sgvec(skb, dsg,
442 (unsigned char *)esph - skb->data,
443 assoclen + ivlen + esp->clen + alen);
444 if (unlikely(err < 0))
Steffen Klasserte6194922017-07-13 09:13:30 +0200445 goto error_free;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200446 }
447
Steffen Klassertcac26612017-01-17 10:22:57 +0100448 if ((x->props.flags & XFRM_STATE_ESN))
449 aead_request_set_callback(req, 0, esp_output_done_esn, skb);
450 else
451 aead_request_set_callback(req, 0, esp_output_done, skb);
452
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200453 aead_request_set_crypt(req, sg, dsg, ivlen + esp->clen, iv);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800454 aead_request_set_ad(req, assoclen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Herbert Xu7021b2e2015-05-27 16:03:46 +0800456 memset(iv, 0, ivlen);
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200457 memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&esp->seqno + 8 - min(ivlen, 8),
Herbert Xu7021b2e2015-05-27 16:03:46 +0800458 min(ivlen, 8));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Herbert Xu38320c72008-01-28 19:35:05 -0800460 ESP_SKB_CB(skb)->tmp = tmp;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800461 err = crypto_aead_encrypt(req);
462
463 switch (err) {
464 case -EINPROGRESS:
Herbert Xu38320c72008-01-28 19:35:05 -0800465 goto error;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Gilad Ben-Yossef068c2e72017-10-18 08:00:35 +0100467 case -ENOSPC:
Herbert Xu38320c72008-01-28 19:35:05 -0800468 err = NET_XMIT_DROP;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800469 break;
470
471 case 0:
472 if ((x->props.flags & XFRM_STATE_ESN))
473 esp_output_restore_header(skb);
474 }
Herbert Xu6b7326c2006-07-30 15:41:01 +1000475
Steffen Klassertcac26612017-01-17 10:22:57 +0100476 if (sg != dsg)
477 esp_ssg_unref(x, tmp);
Herbert Xub7c65382007-10-09 13:33:35 -0700478
Steffen Klasserte6194922017-07-13 09:13:30 +0200479error_free:
480 kfree(tmp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481error:
482 return err;
483}
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200484EXPORT_SYMBOL_GPL(esp_output_tail);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200486static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
487{
488 int alen;
489 int blksize;
490 struct ip_esp_hdr *esph;
491 struct crypto_aead *aead;
492 struct esp_info esp;
493
494 esp.inplace = true;
495
496 esp.proto = *skb_mac_header(skb);
497 *skb_mac_header(skb) = IPPROTO_ESP;
498
499 /* skb is pure payload to encrypt */
500
501 aead = x->data;
502 alen = crypto_aead_authsize(aead);
503
504 esp.tfclen = 0;
505 if (x->tfcpad) {
506 struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
507 u32 padto;
508
509 padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
510 if (skb->len < padto)
511 esp.tfclen = padto - skb->len;
512 }
513 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
514 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
515 esp.plen = esp.clen - skb->len - esp.tfclen;
516 esp.tailen = esp.tfclen + esp.plen + alen;
517
518 esp.esph = ip_esp_hdr(skb);
519
520 esp.nfrags = esp_output_head(x, skb, &esp);
521 if (esp.nfrags < 0)
522 return esp.nfrags;
523
524 esph = esp.esph;
525 esph->spi = x->id.spi;
526
527 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
528 esp.seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
529 ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
530
531 skb_push(skb, -skb_network_offset(skb));
532
533 return esp_output_tail(x, skb, &esp);
534}
535
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300536static inline int esp_remove_trailer(struct sk_buff *skb)
537{
538 struct xfrm_state *x = xfrm_input_state(skb);
539 struct xfrm_offload *xo = xfrm_offload(skb);
540 struct crypto_aead *aead = x->data;
541 int alen, hlen, elen;
542 int padlen, trimlen;
543 __wsum csumdiff;
544 u8 nexthdr[2];
545 int ret;
546
547 alen = crypto_aead_authsize(aead);
548 hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
549 elen = skb->len - hlen;
550
551 if (xo && (xo->flags & XFRM_ESP_NO_TRAILER)) {
552 ret = xo->proto;
553 goto out;
554 }
555
556 if (skb_copy_bits(skb, skb->len - alen - 2, nexthdr, 2))
557 BUG();
558
559 ret = -EINVAL;
560 padlen = nexthdr[0];
561 if (padlen + 2 + alen >= elen) {
562 net_dbg_ratelimited("ipsec esp packet is garbage padlen=%d, elen=%d\n",
563 padlen + 2, elen - alen);
564 goto out;
565 }
566
567 trimlen = alen + padlen + 2;
568 if (skb->ip_summed == CHECKSUM_COMPLETE) {
569 csumdiff = skb_checksum(skb, skb->len - trimlen, trimlen, 0);
570 skb->csum = csum_block_sub(skb->csum, csumdiff,
571 skb->len - trimlen);
572 }
573 pskb_trim(skb, skb->len - trimlen);
574
575 ret = nexthdr[1];
576
577out:
578 return ret;
579}
580
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200581int esp_input_done2(struct sk_buff *skb, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000583 const struct iphdr *iph;
Herbert Xu38320c72008-01-28 19:35:05 -0800584 struct xfrm_state *x = xfrm_input_state(skb);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200585 struct xfrm_offload *xo = xfrm_offload(skb);
Mathias Krause1c5ad132013-10-18 12:09:05 +0200586 struct crypto_aead *aead = x->data;
Herbert Xu38320c72008-01-28 19:35:05 -0800587 int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
Herbert Xu31a4ab92006-05-27 23:06:13 -0700588 int ihl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200590 if (!xo || (xo && !(xo->flags & CRYPTO_DONE)))
591 kfree(ESP_SKB_CB(skb)->tmp);
Herbert Xu0ebea8e2007-11-13 21:45:58 -0800592
Herbert Xu6b7326c2006-07-30 15:41:01 +1000593 if (unlikely(err))
Herbert Xu668dc8a2007-12-16 15:55:02 -0800594 goto out;
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800595
Yossi Kuperman47ebcc02017-08-30 11:30:39 +0300596 err = esp_remove_trailer(skb);
597 if (unlikely(err < 0))
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800598 goto out;
599
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700600 iph = ip_hdr(skb);
Herbert Xu31a4ab92006-05-27 23:06:13 -0700601 ihl = iph->ihl * 4;
602
Herbert Xu752c1f42006-02-27 13:00:40 -0800603 if (x->encap) {
604 struct xfrm_encap_tmpl *encap = x->encap;
Arnaldo Carvalho de Melod56f90a2007-04-10 20:50:43 -0700605 struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
Herbert Xu752c1f42006-02-27 13:00:40 -0800606
607 /*
608 * 1) if the NAT-T peer's IP or port changed then
609 * advertize the change to the keying daemon.
610 * This is an inbound SA, so just compare
611 * SRC ports.
612 */
613 if (iph->saddr != x->props.saddr.a4 ||
614 uh->source != encap->encap_sport) {
615 xfrm_address_t ipaddr;
616
617 ipaddr.a4 = iph->saddr;
618 km_new_mapping(x, &ipaddr, uh->source);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900619
Herbert Xu752c1f42006-02-27 13:00:40 -0800620 /* XXX: perhaps add an extra
621 * policy check here, to see
622 * if we should allow or
623 * reject a packet from a
624 * different source
625 * address/port.
626 */
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800627 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900628
Herbert Xu752c1f42006-02-27 13:00:40 -0800629 /*
630 * 2) ignore UDP/TCP checksums in case
631 * of NAT-T in Transport Mode, or
632 * perform other post-processing fixes
633 * as per draft-ietf-ipsec-udp-encaps-06,
634 * section 3.1.2
635 */
Herbert Xu8bd17072007-10-10 15:41:41 -0700636 if (x->props.mode == XFRM_MODE_TRANSPORT)
Herbert Xu752c1f42006-02-27 13:00:40 -0800637 skb->ip_summed = CHECKSUM_UNNECESSARY;
Herbert Xu4bf05ec2006-02-27 13:00:01 -0800638 }
639
Ilan Tayariec9567a2017-08-01 12:49:04 +0300640 skb_pull_rcsum(skb, hlen);
Li RongQing7143dfa2012-12-28 16:07:16 +0800641 if (x->props.mode == XFRM_MODE_TUNNEL)
642 skb_reset_transport_header(skb);
643 else
644 skb_set_transport_header(skb, -ihl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Herbert Xu38320c72008-01-28 19:35:05 -0800646 /* RFC4303: Drop dummy packets without any error */
647 if (err == IPPROTO_NONE)
648 err = -EINVAL;
649
650out:
651 return err;
652}
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200653EXPORT_SYMBOL_GPL(esp_input_done2);
Herbert Xu38320c72008-01-28 19:35:05 -0800654
655static void esp_input_done(struct crypto_async_request *base, int err)
656{
657 struct sk_buff *skb = base->data;
658
659 xfrm_input_resume(skb, esp_input_done2(skb, err));
660}
661
Herbert Xu7021b2e2015-05-27 16:03:46 +0800662static void esp_input_restore_header(struct sk_buff *skb)
663{
664 esp_restore_header(skb, 0);
665 __skb_pull(skb, 4);
666}
667
Steffen Klassertcac26612017-01-17 10:22:57 +0100668static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
669{
670 struct xfrm_state *x = xfrm_input_state(skb);
Colin Ian King60aa8042018-01-30 14:53:48 +0000671 struct ip_esp_hdr *esph;
Steffen Klassertcac26612017-01-17 10:22:57 +0100672
673 /* For ESN we move the header forward by 4 bytes to
674 * accomodate the high bits. We will move it back after
675 * decryption.
676 */
677 if ((x->props.flags & XFRM_STATE_ESN)) {
Johannes Bergd58ff352017-06-16 14:29:23 +0200678 esph = skb_push(skb, 4);
Steffen Klassertcac26612017-01-17 10:22:57 +0100679 *seqhi = esph->spi;
680 esph->spi = esph->seq_no;
681 esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
682 }
683}
684
Herbert Xu7021b2e2015-05-27 16:03:46 +0800685static void esp_input_done_esn(struct crypto_async_request *base, int err)
686{
687 struct sk_buff *skb = base->data;
688
689 esp_input_restore_header(skb);
690 esp_input_done(base, err);
691}
692
Herbert Xu38320c72008-01-28 19:35:05 -0800693/*
694 * Note: detecting truncated vs. non-truncated authentication data is very
695 * expensive, so we only support truncated data, which is the recommended
696 * and common case.
697 */
698static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
699{
Mathias Krause1c5ad132013-10-18 12:09:05 +0200700 struct crypto_aead *aead = x->data;
Herbert Xu38320c72008-01-28 19:35:05 -0800701 struct aead_request *req;
702 struct sk_buff *trailer;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800703 int ivlen = crypto_aead_ivsize(aead);
Haishuang Yan0c05f982018-08-17 15:51:00 +0800704 int elen = skb->len - sizeof(struct ip_esp_hdr) - ivlen;
Herbert Xu38320c72008-01-28 19:35:05 -0800705 int nfrags;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000706 int assoclen;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000707 int seqhilen;
708 __be32 *seqhi;
Herbert Xu38320c72008-01-28 19:35:05 -0800709 void *tmp;
710 u8 *iv;
711 struct scatterlist *sg;
Herbert Xu38320c72008-01-28 19:35:05 -0800712 int err = -EINVAL;
713
Haishuang Yan0c05f982018-08-17 15:51:00 +0800714 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + ivlen))
Herbert Xu38320c72008-01-28 19:35:05 -0800715 goto out;
716
717 if (elen <= 0)
718 goto out;
719
Haishuang Yan0c05f982018-08-17 15:51:00 +0800720 assoclen = sizeof(struct ip_esp_hdr);
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000721 seqhilen = 0;
722
723 if (x->props.flags & XFRM_STATE_ESN) {
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000724 seqhilen += sizeof(__be32);
725 assoclen += seqhilen;
726 }
727
Steffen Klassertcac26612017-01-17 10:22:57 +0100728 if (!skb_cloned(skb)) {
729 if (!skb_is_nonlinear(skb)) {
730 nfrags = 1;
731
732 goto skip_cow;
733 } else if (!skb_has_frag_list(skb)) {
734 nfrags = skb_shinfo(skb)->nr_frags;
735 nfrags++;
736
737 goto skip_cow;
738 }
739 }
740
741 err = skb_cow_data(skb, 0, &trailer);
742 if (err < 0)
743 goto out;
744
745 nfrags = err;
746
747skip_cow:
Herbert Xu38320c72008-01-28 19:35:05 -0800748 err = -ENOMEM;
Herbert Xu7021b2e2015-05-27 16:03:46 +0800749 tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
Herbert Xu38320c72008-01-28 19:35:05 -0800750 if (!tmp)
751 goto out;
752
753 ESP_SKB_CB(skb)->tmp = tmp;
Herbert Xu962fcef2016-06-18 13:03:36 +0800754 seqhi = esp_tmp_extra(tmp);
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000755 iv = esp_tmp_iv(aead, tmp, seqhilen);
Herbert Xu38320c72008-01-28 19:35:05 -0800756 req = esp_tmp_req(aead, iv);
Herbert Xu7021b2e2015-05-27 16:03:46 +0800757 sg = esp_req_sg(aead, req);
Herbert Xu38320c72008-01-28 19:35:05 -0800758
Steffen Klassertcac26612017-01-17 10:22:57 +0100759 esp_input_set_header(skb, seqhi);
Herbert Xu38320c72008-01-28 19:35:05 -0800760
761 sg_init_table(sg, nfrags);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200762 err = skb_to_sgvec(skb, sg, 0, skb->len);
Steffen Klasserte6194922017-07-13 09:13:30 +0200763 if (unlikely(err < 0)) {
764 kfree(tmp);
Jason A. Donenfeld3f297702017-06-04 04:16:23 +0200765 goto out;
Steffen Klasserte6194922017-07-13 09:13:30 +0200766 }
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000767
Steffen Klassertcac26612017-01-17 10:22:57 +0100768 skb->ip_summed = CHECKSUM_NONE;
769
770 if ((x->props.flags & XFRM_STATE_ESN))
771 aead_request_set_callback(req, 0, esp_input_done_esn, skb);
772 else
773 aead_request_set_callback(req, 0, esp_input_done, skb);
774
Herbert Xu7021b2e2015-05-27 16:03:46 +0800775 aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
776 aead_request_set_ad(req, assoclen);
Herbert Xu38320c72008-01-28 19:35:05 -0800777
778 err = crypto_aead_decrypt(req);
779 if (err == -EINPROGRESS)
780 goto out;
781
Herbert Xu7021b2e2015-05-27 16:03:46 +0800782 if ((x->props.flags & XFRM_STATE_ESN))
783 esp_input_restore_header(skb);
784
Herbert Xu38320c72008-01-28 19:35:05 -0800785 err = esp_input_done2(skb, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787out:
Herbert Xu668dc8a2007-12-16 15:55:02 -0800788 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
Patrick McHardyc5c25232007-04-09 11:47:18 -0700791static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792{
Mathias Krause1c5ad132013-10-18 12:09:05 +0200793 struct crypto_aead *aead = x->data;
794 u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000795 unsigned int net_adj;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
Diego Beltrami0a694522006-10-03 23:47:05 -0700797 switch (x->props.mode) {
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000798 case XFRM_MODE_TRANSPORT:
799 case XFRM_MODE_BEET:
800 net_adj = sizeof(struct iphdr);
801 break;
Diego Beltrami0a694522006-10-03 23:47:05 -0700802 case XFRM_MODE_TUNNEL:
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000803 net_adj = 0;
Diego Beltrami0a694522006-10-03 23:47:05 -0700804 break;
805 default:
Benjamin Poirier91657ea2012-05-24 11:32:38 +0000806 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
Diego Beltrami0a694522006-10-03 23:47:05 -0700808
Mathias Krause1c5ad132013-10-18 12:09:05 +0200809 return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
Mathias Krause123b0d12013-10-18 12:09:04 +0200810 net_adj) & ~(blksize - 1)) + net_adj - 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811}
812
Steffen Klassert827789c2014-02-21 08:41:08 +0100813static int esp4_err(struct sk_buff *skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
Alexey Dobriyan4fb236b2008-11-25 17:59:27 -0800815 struct net *net = dev_net(skb->dev);
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000816 const struct iphdr *iph = (const struct iphdr *)skb->data;
Jianjun Kongd93191002008-11-03 00:23:42 -0800817 struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 struct xfrm_state *x;
819
David S. Miller55be7a92012-07-11 21:27:49 -0700820 switch (icmp_hdr(skb)->type) {
821 case ICMP_DEST_UNREACH:
822 if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
Steffen Klassert827789c2014-02-21 08:41:08 +0100823 return 0;
David S. Miller55be7a92012-07-11 21:27:49 -0700824 case ICMP_REDIRECT:
825 break;
826 default:
Steffen Klassert827789c2014-02-21 08:41:08 +0100827 return 0;
David S. Miller55be7a92012-07-11 21:27:49 -0700828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000830 x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
831 esph->spi, IPPROTO_ESP, AF_INET);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 if (!x)
Steffen Klassert827789c2014-02-21 08:41:08 +0100833 return 0;
David S. Miller55be7a92012-07-11 21:27:49 -0700834
Timo Teräs387aa652013-05-27 20:46:31 +0000835 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
Maciej Żenczykowskid888f392018-09-25 20:56:26 -0700836 ipv4_update_pmtu(skb, net, info, 0, IPPROTO_ESP);
Timo Teräs387aa652013-05-27 20:46:31 +0000837 else
Maciej Żenczykowski1042caa2018-09-25 20:56:27 -0700838 ipv4_redirect(skb, net, 0, IPPROTO_ESP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 xfrm_state_put(x);
Steffen Klassert827789c2014-02-21 08:41:08 +0100840
841 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842}
843
844static void esp_destroy(struct xfrm_state *x)
845{
Mathias Krause1c5ad132013-10-18 12:09:05 +0200846 struct crypto_aead *aead = x->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847
Mathias Krause1c5ad132013-10-18 12:09:05 +0200848 if (!aead)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 return;
850
Mathias Krause1c5ad132013-10-18 12:09:05 +0200851 crypto_free_aead(aead);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Herbert Xu1a6509d2008-01-28 19:37:29 -0800854static int esp_init_aead(struct xfrm_state *x)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855{
Herbert Xu7021b2e2015-05-27 16:03:46 +0800856 char aead_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu1a6509d2008-01-28 19:37:29 -0800857 struct crypto_aead *aead;
858 int err;
859
Herbert Xu7021b2e2015-05-27 16:03:46 +0800860 err = -ENAMETOOLONG;
861 if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
862 x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
863 goto error;
864
Steffen Klassertf58869c2017-12-20 10:41:53 +0100865 aead = crypto_alloc_aead(aead_name, 0, 0);
Herbert Xu1a6509d2008-01-28 19:37:29 -0800866 err = PTR_ERR(aead);
867 if (IS_ERR(aead))
868 goto error;
869
Mathias Krause1c5ad132013-10-18 12:09:05 +0200870 x->data = aead;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800871
872 err = crypto_aead_setkey(aead, x->aead->alg_key,
873 (x->aead->alg_key_len + 7) / 8);
874 if (err)
875 goto error;
876
877 err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
878 if (err)
879 goto error;
880
881error:
882 return err;
883}
884
885static int esp_init_authenc(struct xfrm_state *x)
886{
Herbert Xu38320c72008-01-28 19:35:05 -0800887 struct crypto_aead *aead;
888 struct crypto_authenc_key_param *param;
889 struct rtattr *rta;
890 char *key;
891 char *p;
892 char authenc_name[CRYPTO_MAX_ALG_NAME];
Herbert Xu38320c72008-01-28 19:35:05 -0800893 unsigned int keylen;
894 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Herbert Xu1a6509d2008-01-28 19:37:29 -0800896 err = -EINVAL;
Ian Morris51456b22015-04-03 09:17:26 +0100897 if (!x->ealg)
Herbert Xu1a6509d2008-01-28 19:37:29 -0800898 goto error;
Herbert Xu38320c72008-01-28 19:35:05 -0800899
Herbert Xu1a6509d2008-01-28 19:37:29 -0800900 err = -ENAMETOOLONG;
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000901
902 if ((x->props.flags & XFRM_STATE_ESN)) {
903 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
Herbert Xu7021b2e2015-05-27 16:03:46 +0800904 "%s%sauthencesn(%s,%s)%s",
905 x->geniv ?: "", x->geniv ? "(" : "",
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000906 x->aalg ? x->aalg->alg_name : "digest_null",
Herbert Xu7021b2e2015-05-27 16:03:46 +0800907 x->ealg->alg_name,
908 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000909 goto error;
910 } else {
911 if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
Herbert Xu7021b2e2015-05-27 16:03:46 +0800912 "%s%sauthenc(%s,%s)%s",
913 x->geniv ?: "", x->geniv ? "(" : "",
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000914 x->aalg ? x->aalg->alg_name : "digest_null",
Herbert Xu7021b2e2015-05-27 16:03:46 +0800915 x->ealg->alg_name,
916 x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
Steffen Klassert0dc49e92011-03-08 00:07:14 +0000917 goto error;
918 }
Herbert Xu38320c72008-01-28 19:35:05 -0800919
Steffen Klassertf58869c2017-12-20 10:41:53 +0100920 aead = crypto_alloc_aead(authenc_name, 0, 0);
Herbert Xu38320c72008-01-28 19:35:05 -0800921 err = PTR_ERR(aead);
922 if (IS_ERR(aead))
923 goto error;
924
Mathias Krause1c5ad132013-10-18 12:09:05 +0200925 x->data = aead;
Herbert Xu38320c72008-01-28 19:35:05 -0800926
927 keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
928 (x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
929 err = -ENOMEM;
930 key = kmalloc(keylen, GFP_KERNEL);
931 if (!key)
932 goto error;
933
934 p = key;
935 rta = (void *)p;
936 rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
937 rta->rta_len = RTA_LENGTH(sizeof(*param));
938 param = RTA_DATA(rta);
939 p += RTA_SPACE(sizeof(*param));
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 if (x->aalg) {
942 struct xfrm_algo_desc *aalg_desc;
943
Herbert Xu38320c72008-01-28 19:35:05 -0800944 memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
945 p += (x->aalg->alg_key_len + 7) / 8;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947 aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
948 BUG_ON(!aalg_desc);
949
Herbert Xu38320c72008-01-28 19:35:05 -0800950 err = -EINVAL;
Joe Perches45083492014-11-05 15:36:08 -0800951 if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
Herbert Xu38320c72008-01-28 19:35:05 -0800952 crypto_aead_authsize(aead)) {
Joe Perches45083492014-11-05 15:36:08 -0800953 pr_info("ESP: %s digestsize %u != %hu\n",
954 x->aalg->alg_name,
955 crypto_aead_authsize(aead),
956 aalg_desc->uinfo.auth.icv_fullbits / 8);
Herbert Xu38320c72008-01-28 19:35:05 -0800957 goto free_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959
Herbert Xu38320c72008-01-28 19:35:05 -0800960 err = crypto_aead_setauthsize(
Martin Willi8f8a0882009-11-25 00:29:53 +0000961 aead, x->aalg->alg_trunc_len / 8);
Herbert Xu38320c72008-01-28 19:35:05 -0800962 if (err)
963 goto free_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 }
Herbert Xu4b7137f2007-10-08 17:13:44 -0700965
Herbert Xu38320c72008-01-28 19:35:05 -0800966 param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
967 memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
968
969 err = crypto_aead_setkey(aead, key, keylen);
970
971free_key:
972 kfree(key);
973
Herbert Xu1a6509d2008-01-28 19:37:29 -0800974error:
975 return err;
976}
977
978static int esp_init_state(struct xfrm_state *x)
979{
Herbert Xu1a6509d2008-01-28 19:37:29 -0800980 struct crypto_aead *aead;
981 u32 align;
982 int err;
983
Mathias Krause1c5ad132013-10-18 12:09:05 +0200984 x->data = NULL;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800985
986 if (x->aead)
987 err = esp_init_aead(x);
988 else
989 err = esp_init_authenc(x);
990
Herbert Xu38320c72008-01-28 19:35:05 -0800991 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 goto error;
Herbert Xu38320c72008-01-28 19:35:05 -0800993
Mathias Krause1c5ad132013-10-18 12:09:05 +0200994 aead = x->data;
Herbert Xu1a6509d2008-01-28 19:37:29 -0800995
Herbert Xu38320c72008-01-28 19:35:05 -0800996 x->props.header_len = sizeof(struct ip_esp_hdr) +
997 crypto_aead_ivsize(aead);
Masahide NAKAMURA7e49e6d2006-09-22 15:05:15 -0700998 if (x->props.mode == XFRM_MODE_TUNNEL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 x->props.header_len += sizeof(struct iphdr);
Joakim Koskelaeb49e632008-08-06 02:39:30 -07001000 else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
Patrick McHardyac758e32007-04-09 11:47:58 -07001001 x->props.header_len += IPV4_BEET_PHMAXLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 if (x->encap) {
1003 struct xfrm_encap_tmpl *encap = x->encap;
1004
1005 switch (encap->encap_type) {
1006 default:
Herbert Xubcfd09f2018-01-05 22:12:32 +11001007 err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 goto error;
1009 case UDP_ENCAP_ESPINUDP:
1010 x->props.header_len += sizeof(struct udphdr);
1011 break;
1012 case UDP_ENCAP_ESPINUDP_NON_IKE:
1013 x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
1014 break;
1015 }
1016 }
Herbert Xu38320c72008-01-28 19:35:05 -08001017
1018 align = ALIGN(crypto_aead_blocksize(aead), 4);
Mathias Krause1c5ad132013-10-18 12:09:05 +02001019 x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021error:
Herbert Xu38320c72008-01-28 19:35:05 -08001022 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023}
1024
Steffen Klassert827789c2014-02-21 08:41:08 +01001025static int esp4_rcv_cb(struct sk_buff *skb, int err)
1026{
1027 return 0;
1028}
1029
Eric Dumazet533cb5b2008-01-30 19:11:50 -08001030static const struct xfrm_type esp_type =
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031{
1032 .description = "ESP4",
1033 .owner = THIS_MODULE,
1034 .proto = IPPROTO_ESP,
Herbert Xu436a0a42007-10-08 17:25:53 -07001035 .flags = XFRM_TYPE_REPLAY_PROT,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 .init_state = esp_init_state,
1037 .destructor = esp_destroy,
Patrick McHardyc5c25232007-04-09 11:47:18 -07001038 .get_mtu = esp4_get_mtu,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001039 .input = esp_input,
Steffen Klassertfca11eb2017-04-14 10:06:33 +02001040 .output = esp_output,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041};
1042
Steffen Klassert827789c2014-02-21 08:41:08 +01001043static struct xfrm4_protocol esp4_protocol = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 .handler = xfrm4_rcv,
Steffen Klassert827789c2014-02-21 08:41:08 +01001045 .input_handler = xfrm_input,
1046 .cb_handler = esp4_rcv_cb,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 .err_handler = esp4_err,
Steffen Klassert827789c2014-02-21 08:41:08 +01001048 .priority = 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049};
1050
1051static int __init esp4_init(void)
1052{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 if (xfrm_register_type(&esp_type, AF_INET) < 0) {
Joe Perches058bd4d2012-03-11 18:36:11 +00001054 pr_info("%s: can't add xfrm type\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 return -EAGAIN;
1056 }
Steffen Klassert827789c2014-02-21 08:41:08 +01001057 if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
Joe Perches058bd4d2012-03-11 18:36:11 +00001058 pr_info("%s: can't add protocol\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 xfrm_unregister_type(&esp_type, AF_INET);
1060 return -EAGAIN;
1061 }
1062 return 0;
1063}
1064
1065static void __exit esp4_fini(void)
1066{
Steffen Klassert827789c2014-02-21 08:41:08 +01001067 if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
Joe Perches058bd4d2012-03-11 18:36:11 +00001068 pr_info("%s: can't remove protocol\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
Joe Perches058bd4d2012-03-11 18:36:11 +00001070 pr_info("%s: can't remove xfrm type\n", __func__);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071}
1072
1073module_init(esp4_init);
1074module_exit(esp4_fini);
1075MODULE_LICENSE("GPL");
Masahide NAKAMURAd3d6dd32007-06-26 23:57:49 -07001076MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);