blob: 5bda5aeda579130d6008ff2529d20ed1e9335d5d [file] [log] [blame]
Thomas Gleixner75a6faf2019-06-01 10:08:37 +02001// SPDX-License-Identifier: GPL-2.0-only
Steffen Klassert7785bba2017-02-15 09:40:00 +01002/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
6 * Copyright (C) 2016 secunet Security Networks AG
7 * Author: Steffen Klassert <steffen.klassert@secunet.com>
8 *
Steffen Klassert7785bba2017-02-15 09:40:00 +01009 * ESP GRO support
10 */
11
12#include <linux/skbuff.h>
13#include <linux/init.h>
14#include <net/protocol.h>
15#include <crypto/aead.h>
16#include <crypto/authenc.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <net/ip.h>
20#include <net/xfrm.h>
21#include <net/esp.h>
22#include <linux/scatterlist.h>
23#include <linux/kernel.h>
24#include <linux/slab.h>
25#include <linux/spinlock.h>
26#include <net/udp.h>
27
David Millerd4546c22018-06-24 14:13:49 +090028static struct sk_buff *esp4_gro_receive(struct list_head *head,
29 struct sk_buff *skb)
Steffen Klassert7785bba2017-02-15 09:40:00 +010030{
31 int offset = skb_gro_offset(skb);
32 struct xfrm_offload *xo;
33 struct xfrm_state *x;
34 __be32 seq;
35 __be32 spi;
36 int err;
37
Steffen Klassert374d1b52018-01-05 08:35:47 +010038 if (!pskb_pull(skb, offset))
39 return NULL;
Steffen Klassert7785bba2017-02-15 09:40:00 +010040
41 if ((err = xfrm_parse_spi(skb, IPPROTO_ESP, &spi, &seq)) != 0)
42 goto out;
43
Steffen Klassert7785bba2017-02-15 09:40:00 +010044 xo = xfrm_offload(skb);
Steffen Klassertbcd1f8a2017-04-14 10:07:49 +020045 if (!xo || !(xo->flags & CRYPTO_DONE)) {
Florian Westphal0ca64da2018-12-18 17:15:18 +010046 struct sec_path *sp = secpath_set(skb);
47
48 if (!sp)
Steffen Klassertbcd1f8a2017-04-14 10:07:49 +020049 goto out;
50
Florian Westphal0ca64da2018-12-18 17:15:18 +010051 if (sp->len == XFRM_MAX_DEPTH)
Myungho Jung6ed69182019-03-07 10:23:08 +090052 goto out_reset;
Steffen Klassertbcd1f8a2017-04-14 10:07:49 +020053
54 x = xfrm_state_lookup(dev_net(skb->dev), skb->mark,
55 (xfrm_address_t *)&ip_hdr(skb)->daddr,
56 spi, IPPROTO_ESP, AF_INET);
57 if (!x)
Myungho Jung6ed69182019-03-07 10:23:08 +090058 goto out_reset;
Steffen Klassertbcd1f8a2017-04-14 10:07:49 +020059
Ulrich Weber4e4362d2020-01-15 12:11:29 +010060 skb->mark = xfrm_smark_get(skb->mark, x);
61
Florian Westphal0ca64da2018-12-18 17:15:18 +010062 sp->xvec[sp->len++] = x;
63 sp->olen++;
Steffen Klassertbcd1f8a2017-04-14 10:07:49 +020064
65 xo = xfrm_offload(skb);
Xin Longdb876682020-04-10 17:08:24 +080066 if (!xo)
Myungho Jung6ed69182019-03-07 10:23:08 +090067 goto out_reset;
Steffen Klassert7785bba2017-02-15 09:40:00 +010068 }
Steffen Klassertbcd1f8a2017-04-14 10:07:49 +020069
Steffen Klassert7785bba2017-02-15 09:40:00 +010070 xo->flags |= XFRM_GRO;
71
72 XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
73 XFRM_SPI_SKB_CB(skb)->family = AF_INET;
74 XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
75 XFRM_SPI_SKB_CB(skb)->seq = seq;
76
77 /* We don't need to handle errors from xfrm_input, it does all
78 * the error handling and frees the resources on error. */
79 xfrm_input(skb, IPPROTO_ESP, spi, -2);
80
81 return ERR_PTR(-EINPROGRESS);
Myungho Jung6ed69182019-03-07 10:23:08 +090082out_reset:
83 secpath_reset(skb);
Steffen Klassert7785bba2017-02-15 09:40:00 +010084out:
85 skb_push(skb, offset);
86 NAPI_GRO_CB(skb)->same_flow = 0;
87 NAPI_GRO_CB(skb)->flush = 1;
88
89 return NULL;
90}
91
Steffen Klassert7862b402017-04-14 10:06:50 +020092static void esp4_gso_encap(struct xfrm_state *x, struct sk_buff *skb)
93{
94 struct ip_esp_hdr *esph;
95 struct iphdr *iph = ip_hdr(skb);
96 struct xfrm_offload *xo = xfrm_offload(skb);
97 int proto = iph->protocol;
98
99 skb_push(skb, -skb_network_offset(skb));
100 esph = ip_esp_hdr(skb);
101 *skb_mac_header(skb) = IPPROTO_ESP;
102
103 esph->spi = x->id.spi;
104 esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
105
106 xo->proto = proto;
107}
108
Florian Westphal7613b922019-03-29 21:16:27 +0100109static struct sk_buff *xfrm4_tunnel_gso_segment(struct xfrm_state *x,
110 struct sk_buff *skb,
111 netdev_features_t features)
112{
113 __skb_push(skb, skb->mac_len);
114 return skb_mac_gso_segment(skb, features);
115}
116
117static struct sk_buff *xfrm4_transport_gso_segment(struct xfrm_state *x,
118 struct sk_buff *skb,
119 netdev_features_t features)
120{
121 const struct net_offload *ops;
122 struct sk_buff *segs = ERR_PTR(-EINVAL);
123 struct xfrm_offload *xo = xfrm_offload(skb);
124
125 skb->transport_header += x->props.header_len;
126 ops = rcu_dereference(inet_offloads[xo->proto]);
127 if (likely(ops && ops->callbacks.gso_segment))
128 segs = ops->callbacks.gso_segment(skb, features);
129
130 return segs;
131}
132
Xin Long384a46e2020-03-26 17:02:29 +0800133static struct sk_buff *xfrm4_beet_gso_segment(struct xfrm_state *x,
134 struct sk_buff *skb,
135 netdev_features_t features)
136{
137 struct xfrm_offload *xo = xfrm_offload(skb);
138 struct sk_buff *segs = ERR_PTR(-EINVAL);
139 const struct net_offload *ops;
Xin Long6f297062020-04-19 16:11:02 +0800140 u8 proto = xo->proto;
Xin Long384a46e2020-03-26 17:02:29 +0800141
142 skb->transport_header += x->props.header_len;
143
Xin Long3ffb93b2020-05-18 13:35:19 +0800144 if (x->sel.family != AF_INET6) {
145 if (proto == IPPROTO_BEETPH) {
146 struct ip_beet_phdr *ph =
147 (struct ip_beet_phdr *)skb->data;
Xin Long384a46e2020-03-26 17:02:29 +0800148
Xin Long3ffb93b2020-05-18 13:35:19 +0800149 skb->transport_header += ph->hdrlen * 8;
150 proto = ph->nexthdr;
151 } else {
152 skb->transport_header -= IPV4_BEET_PHMAXLEN;
153 }
154 } else {
Xin Long6f297062020-04-19 16:11:02 +0800155 __be16 frag;
156
157 skb->transport_header +=
158 ipv6_skip_exthdr(skb, 0, &proto, &frag);
159 if (proto == IPPROTO_TCP)
160 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
Xin Long384a46e2020-03-26 17:02:29 +0800161 }
162
163 __skb_pull(skb, skb_transport_offset(skb));
164 ops = rcu_dereference(inet_offloads[proto]);
165 if (likely(ops && ops->callbacks.gso_segment))
166 segs = ops->callbacks.gso_segment(skb, features);
167
168 return segs;
169}
170
Florian Westphal7613b922019-03-29 21:16:27 +0100171static struct sk_buff *xfrm4_outer_mode_gso_segment(struct xfrm_state *x,
172 struct sk_buff *skb,
173 netdev_features_t features)
174{
Florian Westphalc9500d72019-03-29 21:16:32 +0100175 switch (x->outer_mode.encap) {
Florian Westphal7613b922019-03-29 21:16:27 +0100176 case XFRM_MODE_TUNNEL:
177 return xfrm4_tunnel_gso_segment(x, skb, features);
178 case XFRM_MODE_TRANSPORT:
179 return xfrm4_transport_gso_segment(x, skb, features);
Xin Long384a46e2020-03-26 17:02:29 +0800180 case XFRM_MODE_BEET:
181 return xfrm4_beet_gso_segment(x, skb, features);
Florian Westphal7613b922019-03-29 21:16:27 +0100182 }
183
184 return ERR_PTR(-EOPNOTSUPP);
185}
186
Steffen Klassert7862b402017-04-14 10:06:50 +0200187static struct sk_buff *esp4_gso_segment(struct sk_buff *skb,
188 netdev_features_t features)
189{
Steffen Klassert7862b402017-04-14 10:06:50 +0200190 struct xfrm_state *x;
191 struct ip_esp_hdr *esph;
192 struct crypto_aead *aead;
Steffen Klassert7862b402017-04-14 10:06:50 +0200193 netdev_features_t esp_features = features;
194 struct xfrm_offload *xo = xfrm_offload(skb);
Florian Westphal2294be0f2018-12-18 17:15:20 +0100195 struct sec_path *sp;
Steffen Klassert7862b402017-04-14 10:06:50 +0200196
197 if (!xo)
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100198 return ERR_PTR(-EINVAL);
Steffen Klassert7862b402017-04-14 10:06:50 +0200199
Willem de Bruijn121d57a2018-01-19 09:29:18 -0500200 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_ESP))
David S. Miller5ca11442018-01-23 13:49:06 -0500201 return ERR_PTR(-EINVAL);
Steffen Klassert7862b402017-04-14 10:06:50 +0200202
Florian Westphal2294be0f2018-12-18 17:15:20 +0100203 sp = skb_sec_path(skb);
204 x = sp->xvec[sp->len - 1];
Steffen Klassert7862b402017-04-14 10:06:50 +0200205 aead = x->data;
206 esph = ip_esp_hdr(skb);
207
208 if (esph->spi != x->id.spi)
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100209 return ERR_PTR(-EINVAL);
Steffen Klassert7862b402017-04-14 10:06:50 +0200210
211 if (!pskb_may_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead)))
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100212 return ERR_PTR(-EINVAL);
Steffen Klassert7862b402017-04-14 10:06:50 +0200213
214 __skb_pull(skb, sizeof(*esph) + crypto_aead_ivsize(aead));
215
216 skb->encap_hdr_csum = 1;
217
Boris Pismenny65fd2c22019-03-21 16:41:37 +0200218 if ((!(skb->dev->gso_partial_features & NETIF_F_HW_ESP) &&
219 !(features & NETIF_F_HW_ESP)) || x->xso.dev != skb->dev)
Steffen Klassert7862b402017-04-14 10:06:50 +0200220 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
Boris Pismenny65fd2c22019-03-21 16:41:37 +0200221 else if (!(features & NETIF_F_HW_ESP_TX_CSUM) &&
222 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP_TX_CSUM))
Shannon Nelson5211fcf2018-02-26 14:28:19 -0800223 esp_features = features & ~NETIF_F_CSUM_MASK;
Steffen Klassert7862b402017-04-14 10:06:50 +0200224
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100225 xo->flags |= XFRM_GSO_SEGMENT;
Steffen Klassert7862b402017-04-14 10:06:50 +0200226
Florian Westphal7613b922019-03-29 21:16:27 +0100227 return xfrm4_outer_mode_gso_segment(x, skb, esp_features);
Steffen Klassert7862b402017-04-14 10:06:50 +0200228}
229
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200230static int esp_input_tail(struct xfrm_state *x, struct sk_buff *skb)
231{
232 struct crypto_aead *aead = x->data;
Ilan Tayariec9567a2017-08-01 12:49:04 +0300233 struct xfrm_offload *xo = xfrm_offload(skb);
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200234
235 if (!pskb_may_pull(skb, sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead)))
236 return -EINVAL;
237
Ilan Tayariec9567a2017-08-01 12:49:04 +0300238 if (!(xo->flags & CRYPTO_DONE))
239 skb->ip_summed = CHECKSUM_NONE;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200240
241 return esp_input_done2(skb, 0);
242}
243
244static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features)
245{
246 int err;
247 int alen;
248 int blksize;
249 struct xfrm_offload *xo;
250 struct ip_esp_hdr *esph;
251 struct crypto_aead *aead;
252 struct esp_info esp;
253 bool hw_offload = true;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100254 __u32 seq;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200255
256 esp.inplace = true;
257
258 xo = xfrm_offload(skb);
259
260 if (!xo)
261 return -EINVAL;
262
Boris Pismenny65fd2c22019-03-21 16:41:37 +0200263 if ((!(features & NETIF_F_HW_ESP) &&
264 !(skb->dev->gso_partial_features & NETIF_F_HW_ESP)) ||
265 x->xso.dev != skb->dev) {
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200266 xo->flags |= CRYPTO_FALLBACK;
267 hw_offload = false;
268 }
269
270 esp.proto = xo->proto;
271
272 /* skb is pure payload to encrypt */
273
274 aead = x->data;
275 alen = crypto_aead_authsize(aead);
276
277 esp.tfclen = 0;
278 /* XXX: Add support for tfc padding here. */
279
280 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
281 esp.clen = ALIGN(skb->len + 2 + esp.tfclen, blksize);
282 esp.plen = esp.clen - skb->len - esp.tfclen;
283 esp.tailen = esp.tfclen + esp.plen + alen;
284
285 esp.esph = ip_esp_hdr(skb);
286
287
288 if (!hw_offload || (hw_offload && !skb_is_gso(skb))) {
289 esp.nfrags = esp_output_head(x, skb, &esp);
290 if (esp.nfrags < 0)
291 return esp.nfrags;
292 }
293
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100294 seq = xo->seq.low;
295
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200296 esph = esp.esph;
297 esph->spi = x->id.spi;
298
299 skb_push(skb, -skb_network_offset(skb));
300
301 if (xo->flags & XFRM_GSO_SEGMENT) {
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100302 esph->seq_no = htonl(seq);
303
304 if (!skb_is_gso(skb))
305 xo->seq.low++;
306 else
307 xo->seq.low += skb_shinfo(skb)->gso_segs;
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200308 }
309
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100310 esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32));
311
312 ip_hdr(skb)->tot_len = htons(skb->len);
313 ip_send_check(ip_hdr(skb));
314
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200315 if (hw_offload)
316 return 0;
317
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200318 err = esp_output_tail(x, skb, &esp);
Steffen Klassert4ff03082017-08-07 08:31:07 +0200319 if (err)
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200320 return err;
321
322 secpath_reset(skb);
323
324 return 0;
325}
326
Steffen Klassert7785bba2017-02-15 09:40:00 +0100327static const struct net_offload esp4_offload = {
328 .callbacks = {
329 .gro_receive = esp4_gro_receive,
Steffen Klassert7862b402017-04-14 10:06:50 +0200330 .gso_segment = esp4_gso_segment,
Steffen Klassert7785bba2017-02-15 09:40:00 +0100331 },
332};
333
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200334static const struct xfrm_type_offload esp_type_offload = {
335 .description = "ESP4 OFFLOAD",
336 .owner = THIS_MODULE,
337 .proto = IPPROTO_ESP,
338 .input_tail = esp_input_tail,
339 .xmit = esp_xmit,
Steffen Klassert7862b402017-04-14 10:06:50 +0200340 .encap = esp4_gso_encap,
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200341};
342
Steffen Klassert7785bba2017-02-15 09:40:00 +0100343static int __init esp4_offload_init(void)
344{
Steffen Klassertfca11eb2017-04-14 10:06:33 +0200345 if (xfrm_register_type_offload(&esp_type_offload, AF_INET) < 0) {
346 pr_info("%s: can't add xfrm type offload\n", __func__);
347 return -EAGAIN;
348 }
349
Steffen Klassert7785bba2017-02-15 09:40:00 +0100350 return inet_add_offload(&esp4_offload, IPPROTO_ESP);
351}
352
353static void __exit esp4_offload_exit(void)
354{
Florian Westphal4f518e82019-05-03 17:46:19 +0200355 xfrm_unregister_type_offload(&esp_type_offload, AF_INET);
Steffen Klassert7785bba2017-02-15 09:40:00 +0100356 inet_del_offload(&esp4_offload, IPPROTO_ESP);
357}
358
359module_init(esp4_offload_init);
360module_exit(esp4_offload_exit);
361MODULE_LICENSE("GPL");
362MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
Ilan Tayariffdb5212017-08-01 12:49:08 +0300363MODULE_ALIAS_XFRM_OFFLOAD_TYPE(AF_INET, XFRM_PROTO_ESP);
Rob Gill67c20de2020-06-20 02:08:25 +0000364MODULE_DESCRIPTION("IPV4 GSO/GRO offload support");