blob: 3fa066419d379a2aeb0747d3615cecd3d24b9172 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Steffen Klassert21f42cc2017-04-14 10:05:53 +02002/*
3 * xfrm_device.c - IPsec device offloading code.
4 *
5 * Copyright (c) 2015 secunet Security Networks AG
6 *
7 * Author:
8 * Steffen Klassert <steffen.klassert@secunet.com>
Steffen Klassert21f42cc2017-04-14 10:05:53 +02009 */
10
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/netdevice.h>
14#include <linux/skbuff.h>
15#include <linux/slab.h>
16#include <linux/spinlock.h>
17#include <net/dst.h>
18#include <net/xfrm.h>
19#include <linux/notifier.h>
20
Hangbin Liub81f884a2017-06-01 14:57:56 +080021#ifdef CONFIG_XFRM_OFFLOAD
Florian Westphal303c5fa2019-03-29 21:16:26 +010022static void __xfrm_transport_prep(struct xfrm_state *x, struct sk_buff *skb,
23 unsigned int hsize)
24{
25 struct xfrm_offload *xo = xfrm_offload(skb);
26
27 skb_reset_mac_len(skb);
Xin Long06a0afc2020-04-10 17:06:31 +080028 if (xo->flags & XFRM_GSO_SEGMENT)
Florian Westphal303c5fa2019-03-29 21:16:26 +010029 skb->transport_header -= x->props.header_len;
Xin Long06a0afc2020-04-10 17:06:31 +080030
31 pskb_pull(skb, skb_transport_offset(skb) + x->props.header_len);
Florian Westphal303c5fa2019-03-29 21:16:26 +010032}
33
34static void __xfrm_mode_tunnel_prep(struct xfrm_state *x, struct sk_buff *skb,
35 unsigned int hsize)
36
37{
38 struct xfrm_offload *xo = xfrm_offload(skb);
39
40 if (xo->flags & XFRM_GSO_SEGMENT)
41 skb->transport_header = skb->network_header + hsize;
42
43 skb_reset_mac_len(skb);
44 pskb_pull(skb, skb->mac_len + x->props.header_len);
45}
46
Xin Long30849172020-03-26 17:02:31 +080047static void __xfrm_mode_beet_prep(struct xfrm_state *x, struct sk_buff *skb,
48 unsigned int hsize)
49{
50 struct xfrm_offload *xo = xfrm_offload(skb);
51 int phlen = 0;
52
53 if (xo->flags & XFRM_GSO_SEGMENT)
54 skb->transport_header = skb->network_header + hsize;
55
56 skb_reset_mac_len(skb);
57 if (x->sel.family != AF_INET6) {
58 phlen = IPV4_BEET_PHMAXLEN;
59 if (x->outer_mode.family == AF_INET6)
60 phlen += sizeof(struct ipv6hdr) - sizeof(struct iphdr);
61 }
62
63 pskb_pull(skb, skb->mac_len + hsize + (x->props.header_len - phlen));
64}
65
Florian Westphal303c5fa2019-03-29 21:16:26 +010066/* Adjust pointers into the packet when IPsec is done at layer2 */
67static void xfrm_outer_mode_prep(struct xfrm_state *x, struct sk_buff *skb)
68{
Florian Westphalc9500d72019-03-29 21:16:32 +010069 switch (x->outer_mode.encap) {
Florian Westphal303c5fa2019-03-29 21:16:26 +010070 case XFRM_MODE_TUNNEL:
Florian Westphalc9500d72019-03-29 21:16:32 +010071 if (x->outer_mode.family == AF_INET)
Florian Westphal303c5fa2019-03-29 21:16:26 +010072 return __xfrm_mode_tunnel_prep(x, skb,
73 sizeof(struct iphdr));
Florian Westphalc9500d72019-03-29 21:16:32 +010074 if (x->outer_mode.family == AF_INET6)
Florian Westphal303c5fa2019-03-29 21:16:26 +010075 return __xfrm_mode_tunnel_prep(x, skb,
76 sizeof(struct ipv6hdr));
77 break;
78 case XFRM_MODE_TRANSPORT:
Florian Westphalc9500d72019-03-29 21:16:32 +010079 if (x->outer_mode.family == AF_INET)
Florian Westphal303c5fa2019-03-29 21:16:26 +010080 return __xfrm_transport_prep(x, skb,
81 sizeof(struct iphdr));
Florian Westphalc9500d72019-03-29 21:16:32 +010082 if (x->outer_mode.family == AF_INET6)
Florian Westphal303c5fa2019-03-29 21:16:26 +010083 return __xfrm_transport_prep(x, skb,
84 sizeof(struct ipv6hdr));
85 break;
Xin Long30849172020-03-26 17:02:31 +080086 case XFRM_MODE_BEET:
87 if (x->outer_mode.family == AF_INET)
88 return __xfrm_mode_beet_prep(x, skb,
89 sizeof(struct iphdr));
90 if (x->outer_mode.family == AF_INET6)
91 return __xfrm_mode_beet_prep(x, skb,
92 sizeof(struct ipv6hdr));
93 break;
Florian Westphal303c5fa2019-03-29 21:16:26 +010094 case XFRM_MODE_ROUTEOPTIMIZATION:
95 case XFRM_MODE_IN_TRIGGER:
Florian Westphal303c5fa2019-03-29 21:16:26 +010096 break;
97 }
98}
99
Steffen Klassertf53c7232017-12-20 10:41:36 +0100100struct sk_buff *validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features, bool *again)
Steffen Klassertf6e27112017-04-14 10:07:28 +0200101{
102 int err;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100103 unsigned long flags;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200104 struct xfrm_state *x;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100105 struct softnet_data *sd;
Xin Longd1d17a32020-03-04 16:51:42 +0800106 struct sk_buff *skb2, *nskb, *pskb = NULL;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100107 netdev_features_t esp_features = features;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200108 struct xfrm_offload *xo = xfrm_offload(skb);
Jarod Wilson272c2332020-06-19 10:31:52 -0400109 struct net_device *dev = skb->dev;
Florian Westphal2294be0f2018-12-18 17:15:20 +0100110 struct sec_path *sp;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200111
Huy Nguyen94579ac2020-06-01 16:39:37 -0500112 if (!xo || (xo->flags & XFRM_XMIT))
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100113 return skb;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200114
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100115 if (!(features & NETIF_F_HW_ESP))
116 esp_features = features & ~(NETIF_F_SG | NETIF_F_CSUM_MASK);
Steffen Klassertf6e27112017-04-14 10:07:28 +0200117
Florian Westphal2294be0f2018-12-18 17:15:20 +0100118 sp = skb_sec_path(skb);
119 x = sp->xvec[sp->len - 1];
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100120 if (xo->flags & XFRM_GRO || x->xso.flags & XFRM_OFFLOAD_INBOUND)
121 return skb;
122
Jarod Wilsonbdfd2d12020-06-23 16:40:01 -0400123 /* This skb was already validated on the upper/virtual dev */
124 if ((x->xso.dev != dev) && (x->xso.real_dev == dev))
Jarod Wilson272c2332020-06-19 10:31:52 -0400125 return skb;
126
Steffen Klassertf53c7232017-12-20 10:41:36 +0100127 local_irq_save(flags);
128 sd = this_cpu_ptr(&softnet_data);
129 err = !skb_queue_empty(&sd->xfrm_backlog);
130 local_irq_restore(flags);
131
132 if (err) {
133 *again = true;
134 return skb;
135 }
136
Jarod Wilson272c2332020-06-19 10:31:52 -0400137 if (skb_is_gso(skb) && unlikely(x->xso.dev != dev)) {
138 struct sk_buff *segs;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100139
Jarod Wilson272c2332020-06-19 10:31:52 -0400140 /* Packet got rerouted, fixup features and segment it. */
141 esp_features = esp_features & ~(NETIF_F_HW_ESP | NETIF_F_GSO_ESP);
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100142
Jarod Wilson272c2332020-06-19 10:31:52 -0400143 segs = skb_gso_segment(skb, esp_features);
144 if (IS_ERR(segs)) {
145 kfree_skb(skb);
146 atomic_long_inc(&dev->tx_dropped);
147 return NULL;
148 } else {
149 consume_skb(skb);
150 skb = segs;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100151 }
152 }
153
154 if (!skb->next) {
Boris Pismenny65fd2c22019-03-21 16:41:37 +0200155 esp_features |= skb->dev->gso_partial_features;
Florian Westphal303c5fa2019-03-29 21:16:26 +0100156 xfrm_outer_mode_prep(x, skb);
Steffen Klassertf6e27112017-04-14 10:07:28 +0200157
Steffen Klassertf53c7232017-12-20 10:41:36 +0100158 xo->flags |= XFRM_DEV_RESUME;
159
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100160 err = x->type_offload->xmit(x, skb, esp_features);
Steffen Klassertf6e27112017-04-14 10:07:28 +0200161 if (err) {
Steffen Klassertf53c7232017-12-20 10:41:36 +0100162 if (err == -EINPROGRESS)
163 return NULL;
164
Steffen Klassertf6e27112017-04-14 10:07:28 +0200165 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100166 kfree_skb(skb);
167 return NULL;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200168 }
169
170 skb_push(skb, skb->data - skb_mac_header(skb));
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100171
172 return skb;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200173 }
174
Jason A. Donenfeldc3b18e02020-01-13 18:42:28 -0500175 skb_list_walk_safe(skb, skb2, nskb) {
Boris Pismenny65fd2c22019-03-21 16:41:37 +0200176 esp_features |= skb->dev->gso_partial_features;
David S. Millera8305bf2018-07-29 20:42:53 -0700177 skb_mark_not_on_list(skb2);
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100178
179 xo = xfrm_offload(skb2);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100180 xo->flags |= XFRM_DEV_RESUME;
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100181
Florian Westphal303c5fa2019-03-29 21:16:26 +0100182 xfrm_outer_mode_prep(x, skb2);
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100183
184 err = x->type_offload->xmit(x, skb2, esp_features);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100185 if (!err) {
186 skb2->next = nskb;
187 } else if (err != -EINPROGRESS) {
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100188 XFRM_INC_STATS(xs_net(x), LINUX_MIB_XFRMOUTSTATEPROTOERROR);
189 skb2->next = nskb;
190 kfree_skb_list(skb2);
191 return NULL;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100192 } else {
193 if (skb == skb2)
194 skb = nskb;
Xin Longd1d17a32020-03-04 16:51:42 +0800195 else
196 pskb->next = nskb;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100197
Jason A. Donenfeldc3b18e02020-01-13 18:42:28 -0500198 continue;
Steffen Klassertf53c7232017-12-20 10:41:36 +0100199 }
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100200
201 skb_push(skb2, skb2->data - skb_mac_header(skb2));
Xin Longd1d17a32020-03-04 16:51:42 +0800202 pskb = skb2;
Jason A. Donenfeldc3b18e02020-01-13 18:42:28 -0500203 }
Steffen Klassert3dca3f32017-12-20 10:41:31 +0100204
205 return skb;
Steffen Klassertf6e27112017-04-14 10:07:28 +0200206}
207EXPORT_SYMBOL_GPL(validate_xmit_xfrm);
208
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200209int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
210 struct xfrm_user_offload *xuo)
211{
212 int err;
213 struct dst_entry *dst;
214 struct net_device *dev;
215 struct xfrm_state_offload *xso = &x->xso;
216 xfrm_address_t *saddr;
217 xfrm_address_t *daddr;
218
219 if (!x->type_offload)
Ilan Tayariffdb5212017-08-01 12:49:08 +0300220 return -EINVAL;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200221
Yossef Efraim50bd8702018-01-14 11:39:10 +0200222 /* We don't yet support UDP encapsulation and TFC padding. */
223 if (x->encap || x->tfcpad)
Yossef Efraim43024b92017-11-28 11:49:29 +0200224 return -EINVAL;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200225
226 dev = dev_get_by_index(net, xuo->ifindex);
227 if (!dev) {
228 if (!(xuo->flags & XFRM_OFFLOAD_INBOUND)) {
229 saddr = &x->props.saddr;
230 daddr = &x->id.daddr;
231 } else {
232 saddr = &x->id.daddr;
233 daddr = &x->props.saddr;
234 }
235
Lorenzo Colitti077fbac2017-08-11 02:11:33 +0900236 dst = __xfrm_dst_lookup(net, 0, 0, saddr, daddr,
Steffen Klassert9b42c1f2018-06-12 12:44:26 +0200237 x->props.family,
238 xfrm_smark_get(0, x));
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200239 if (IS_ERR(dst))
240 return 0;
241
242 dev = dst->dev;
243
244 dev_hold(dev);
245 dst_release(dst);
246 }
247
248 if (!dev->xfrmdev_ops || !dev->xfrmdev_ops->xdo_dev_state_add) {
Steffen Klassert67a63382017-09-04 10:59:55 +0200249 xso->dev = NULL;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200250 dev_put(dev);
251 return 0;
252 }
253
Yossef Efraim50bd8702018-01-14 11:39:10 +0200254 if (x->props.flags & XFRM_STATE_ESN &&
255 !dev->xfrmdev_ops->xdo_dev_state_advance_esn) {
256 xso->dev = NULL;
257 dev_put(dev);
258 return -EINVAL;
259 }
260
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200261 xso->dev = dev;
Eric Dumazete1b539b2021-12-09 07:44:51 -0800262 netdev_tracker_alloc(dev, &xso->dev_tracker, GFP_ATOMIC);
Jarod Wilsonbdfd2d12020-06-23 16:40:01 -0400263 xso->real_dev = dev;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200264 xso->num_exthdrs = 1;
265 xso->flags = xuo->flags;
266
267 err = dev->xfrmdev_ops->xdo_dev_state_add(x);
268 if (err) {
Shannon Nelson4a132092018-08-22 14:38:10 -0700269 xso->num_exthdrs = 0;
270 xso->flags = 0;
Aviad Yehezkelaa5dd6f2018-01-18 15:41:51 +0200271 xso->dev = NULL;
Ayush Sawaldd72fad2021-06-22 09:25:31 +0530272 xso->real_dev = NULL;
Eric Dumazete1b539b2021-12-09 07:44:51 -0800273 dev_put_track(dev, &xso->dev_tracker);
Shannon Nelson4a132092018-08-22 14:38:10 -0700274
275 if (err != -EOPNOTSUPP)
276 return err;
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200277 }
278
279 return 0;
280}
281EXPORT_SYMBOL_GPL(xfrm_dev_state_add);
282
283bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
284{
285 int mtu;
286 struct dst_entry *dst = skb_dst(skb);
287 struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
288 struct net_device *dev = x->xso.dev;
289
290 if (!x->type_offload || x->encap)
291 return false;
292
Shannon Nelsonfcb662d2018-06-26 14:19:10 -0700293 if ((!dev || (dev == xfrm_dst_path(dst)->dev)) &&
Florian Westphalc7b37c72019-06-24 22:04:48 +0200294 (!xdst->child->xfrm)) {
295 mtu = xfrm_state_mtu(x, xdst->child_mtu_cached);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200296 if (skb->len <= mtu)
297 goto ok;
298
Daniel Axtens779b7932018-03-01 17:13:37 +1100299 if (skb_is_gso(skb) && skb_gso_validate_network_len(skb, mtu))
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200300 goto ok;
301 }
302
303 return false;
304
305ok:
306 if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_offload_ok)
307 return x->xso.dev->xfrmdev_ops->xdo_dev_offload_ok(skb, x);
308
309 return true;
310}
311EXPORT_SYMBOL_GPL(xfrm_dev_offload_ok);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100312
313void xfrm_dev_resume(struct sk_buff *skb)
314{
315 struct net_device *dev = skb->dev;
316 int ret = NETDEV_TX_BUSY;
317 struct netdev_queue *txq;
318 struct softnet_data *sd;
319 unsigned long flags;
320
321 rcu_read_lock();
Paolo Abeni4bd97d52019-03-20 11:02:04 +0100322 txq = netdev_core_pick_tx(dev, skb, NULL);
Steffen Klassertf53c7232017-12-20 10:41:36 +0100323
324 HARD_TX_LOCK(dev, txq, smp_processor_id());
325 if (!netif_xmit_frozen_or_stopped(txq))
326 skb = dev_hard_start_xmit(skb, dev, txq, &ret);
327 HARD_TX_UNLOCK(dev, txq);
328
329 if (!dev_xmit_complete(ret)) {
330 local_irq_save(flags);
331 sd = this_cpu_ptr(&softnet_data);
332 skb_queue_tail(&sd->xfrm_backlog, skb);
333 raise_softirq_irqoff(NET_TX_SOFTIRQ);
334 local_irq_restore(flags);
335 }
336 rcu_read_unlock();
337}
338EXPORT_SYMBOL_GPL(xfrm_dev_resume);
339
340void xfrm_dev_backlog(struct softnet_data *sd)
341{
342 struct sk_buff_head *xfrm_backlog = &sd->xfrm_backlog;
343 struct sk_buff_head list;
344 struct sk_buff *skb;
345
346 if (skb_queue_empty(xfrm_backlog))
347 return;
348
349 __skb_queue_head_init(&list);
350
351 spin_lock(&xfrm_backlog->lock);
352 skb_queue_splice_init(xfrm_backlog, &list);
353 spin_unlock(&xfrm_backlog->lock);
354
355 while (!skb_queue_empty(&list)) {
356 skb = __skb_dequeue(&list);
357 xfrm_dev_resume(skb);
358 }
359
360}
Hangbin Liub81f884a2017-06-01 14:57:56 +0800361#endif
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200362
Shannon Nelson92a23202017-12-19 15:35:48 -0800363static int xfrm_api_check(struct net_device *dev)
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200364{
Shannon Nelson92a23202017-12-19 15:35:48 -0800365#ifdef CONFIG_XFRM_OFFLOAD
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200366 if ((dev->features & NETIF_F_HW_ESP_TX_CSUM) &&
367 !(dev->features & NETIF_F_HW_ESP))
368 return NOTIFY_BAD;
369
Shannon Nelson92a23202017-12-19 15:35:48 -0800370 if ((dev->features & NETIF_F_HW_ESP) &&
371 (!(dev->xfrmdev_ops &&
372 dev->xfrmdev_ops->xdo_dev_state_add &&
373 dev->xfrmdev_ops->xdo_dev_state_delete)))
374 return NOTIFY_BAD;
375#else
376 if (dev->features & (NETIF_F_HW_ESP | NETIF_F_HW_ESP_TX_CSUM))
377 return NOTIFY_BAD;
378#endif
379
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200380 return NOTIFY_DONE;
381}
382
Shannon Nelson92a23202017-12-19 15:35:48 -0800383static int xfrm_dev_register(struct net_device *dev)
384{
385 return xfrm_api_check(dev);
386}
387
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200388static int xfrm_dev_feat_change(struct net_device *dev)
389{
Shannon Nelson92a23202017-12-19 15:35:48 -0800390 return xfrm_api_check(dev);
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200391}
392
393static int xfrm_dev_down(struct net_device *dev)
394{
Ilan Tayari2c1497b2017-05-08 10:30:18 +0300395 if (dev->features & NETIF_F_HW_ESP)
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200396 xfrm_dev_state_flush(dev_net(dev), dev, true);
397
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200398 return NOTIFY_DONE;
399}
400
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200401static int xfrm_dev_event(struct notifier_block *this, unsigned long event, void *ptr)
402{
403 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
404
405 switch (event) {
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200406 case NETDEV_REGISTER:
407 return xfrm_dev_register(dev);
408
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200409 case NETDEV_FEAT_CHANGE:
410 return xfrm_dev_feat_change(dev);
411
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200412 case NETDEV_DOWN:
Raed Salem03891f82020-02-02 13:19:34 +0200413 case NETDEV_UNREGISTER:
Steffen Klassertd77e38e2017-04-14 10:06:10 +0200414 return xfrm_dev_down(dev);
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200415 }
416 return NOTIFY_DONE;
417}
418
419static struct notifier_block xfrm_dev_notifier = {
420 .notifier_call = xfrm_dev_event,
421};
422
Kirill Tkhaie9a441b2018-03-29 17:03:25 +0300423void __init xfrm_dev_init(void)
Steffen Klassert21f42cc2017-04-14 10:05:53 +0200424{
425 register_netdevice_notifier(&xfrm_dev_notifier);
426}