Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 2 | /* |
| 3 | * IPV6 GSO/GRO offload support |
| 4 | * Linux INET6 implementation |
| 5 | * |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 6 | * UDPv6 GSO support |
| 7 | */ |
| 8 | #include <linux/skbuff.h> |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 9 | #include <linux/netdevice.h> |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 10 | #include <linux/indirect_call_wrapper.h> |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 11 | #include <net/protocol.h> |
| 12 | #include <net/ipv6.h> |
| 13 | #include <net/udp.h> |
Vlad Yasevich | d4d0d35 | 2012-11-15 16:35:37 +0000 | [diff] [blame] | 14 | #include <net/ip6_checksum.h> |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 15 | #include "ip6_offload.h" |
Eric Dumazet | 4721031 | 2021-11-15 09:05:51 -0800 | [diff] [blame] | 16 | #include <net/gro.h> |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 17 | |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 18 | static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb, |
| 19 | netdev_features_t features) |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 20 | { |
| 21 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 22 | unsigned int mss; |
| 23 | unsigned int unfrag_ip6hlen, unfrag_len; |
| 24 | struct frag_hdr *fptr; |
| 25 | u8 *packet_start, *prevhdr; |
| 26 | u8 nexthdr; |
| 27 | u8 frag_hdr_sz = sizeof(struct frag_hdr); |
| 28 | __wsum csum; |
| 29 | int tnl_hlen; |
| 30 | int err; |
| 31 | |
Tom Herbert | 0f4f4ff | 2014-06-04 17:20:16 -0700 | [diff] [blame] | 32 | if (skb->encapsulation && skb_shinfo(skb)->gso_type & |
| 33 | (SKB_GSO_UDP_TUNNEL|SKB_GSO_UDP_TUNNEL_CSUM)) |
Tom Herbert | 8bce6d7 | 2014-09-29 20:22:29 -0700 | [diff] [blame] | 34 | segs = skb_udp_tunnel_segment(skb, features, true); |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 35 | else { |
| 36 | const struct ipv6hdr *ipv6h; |
| 37 | struct udphdr *uh; |
Tom Herbert | f71470b | 2014-09-20 14:52:29 -0700 | [diff] [blame] | 38 | |
Willem de Bruijn | ee80d1e | 2018-04-26 13:42:16 -0400 | [diff] [blame] | 39 | if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_UDP | SKB_GSO_UDP_L4))) |
Willem de Bruijn | 121d57a | 2018-01-19 09:29:18 -0500 | [diff] [blame] | 40 | goto out; |
| 41 | |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 42 | if (!pskb_may_pull(skb, sizeof(struct udphdr))) |
| 43 | goto out; |
| 44 | |
Willem de Bruijn | ee80d1e | 2018-04-26 13:42:16 -0400 | [diff] [blame] | 45 | if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) |
Dongseok Yi | c3df39a | 2021-01-30 08:13:27 +0900 | [diff] [blame] | 46 | return __udp_gso_segment(skb, features, true); |
Willem de Bruijn | ee80d1e | 2018-04-26 13:42:16 -0400 | [diff] [blame] | 47 | |
Xin Long | 3c7d441 | 2020-10-29 15:04:56 +0800 | [diff] [blame] | 48 | mss = skb_shinfo(skb)->gso_size; |
| 49 | if (unlikely(skb->len <= mss)) |
| 50 | goto out; |
| 51 | |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 52 | /* Do software UFO. Complete and fill in the UDP checksum as HW cannot |
| 53 | * do checksum of UDP packets sent as multiple IP fragments. |
| 54 | */ |
| 55 | |
| 56 | uh = udp_hdr(skb); |
| 57 | ipv6h = ipv6_hdr(skb); |
| 58 | |
| 59 | uh->check = 0; |
| 60 | csum = skb_checksum(skb, 0, skb->len, 0); |
| 61 | uh->check = udp_v6_check(skb->len, &ipv6h->saddr, |
| 62 | &ipv6h->daddr, csum); |
| 63 | if (uh->check == 0) |
| 64 | uh->check = CSUM_MANGLED_0; |
| 65 | |
| 66 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 67 | |
| 68 | /* If there is no outer header we can fake a checksum offload |
| 69 | * due to the fact that we have already done the checksum in |
| 70 | * software prior to segmenting the frame. |
| 71 | */ |
| 72 | if (!skb->encap_hdr_csum) |
| 73 | features |= NETIF_F_HW_CSUM; |
| 74 | |
| 75 | /* Check if there is enough headroom to insert fragment header. */ |
| 76 | tnl_hlen = skb_tnl_header_len(skb); |
| 77 | if (skb->mac_header < (tnl_hlen + frag_hdr_sz)) { |
| 78 | if (gso_pskb_expand_head(skb, tnl_hlen + frag_hdr_sz)) |
| 79 | goto out; |
| 80 | } |
| 81 | |
| 82 | /* Find the unfragmentable header and shift it left by frag_hdr_sz |
| 83 | * bytes to insert fragment header. |
| 84 | */ |
| 85 | err = ip6_find_1stfragopt(skb, &prevhdr); |
| 86 | if (err < 0) |
| 87 | return ERR_PTR(err); |
| 88 | unfrag_ip6hlen = err; |
| 89 | nexthdr = *prevhdr; |
| 90 | *prevhdr = NEXTHDR_FRAGMENT; |
| 91 | unfrag_len = (skb_network_header(skb) - skb_mac_header(skb)) + |
| 92 | unfrag_ip6hlen + tnl_hlen; |
| 93 | packet_start = (u8 *) skb->head + SKB_GSO_CB(skb)->mac_offset; |
| 94 | memmove(packet_start-frag_hdr_sz, packet_start, unfrag_len); |
| 95 | |
| 96 | SKB_GSO_CB(skb)->mac_offset -= frag_hdr_sz; |
| 97 | skb->mac_header -= frag_hdr_sz; |
| 98 | skb->network_header -= frag_hdr_sz; |
| 99 | |
| 100 | fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen); |
| 101 | fptr->nexthdr = nexthdr; |
| 102 | fptr->reserved = 0; |
| 103 | fptr->identification = ipv6_proxy_select_ident(dev_net(skb->dev), skb); |
| 104 | |
| 105 | /* Fragment the skb. ipv6 header and the remaining fields of the |
| 106 | * fragment header are updated in ipv6_gso_segment() |
| 107 | */ |
| 108 | segs = skb_segment(skb, features); |
| 109 | } |
| 110 | |
| 111 | out: |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 112 | return segs; |
| 113 | } |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 114 | |
Alexander Lobakin | 55e7298 | 2020-11-11 20:45:38 +0000 | [diff] [blame] | 115 | static struct sock *udp6_gro_lookup_skb(struct sk_buff *skb, __be16 sport, |
| 116 | __be16 dport) |
| 117 | { |
| 118 | const struct ipv6hdr *iph = skb_gro_network_header(skb); |
| 119 | |
| 120 | return __udp6_lib_lookup(dev_net(skb->dev), &iph->saddr, sport, |
| 121 | &iph->daddr, dport, inet6_iif(skb), |
| 122 | inet6_sdif(skb), &udp_table, NULL); |
| 123 | } |
| 124 | |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 125 | INDIRECT_CALLABLE_SCOPE |
| 126 | struct sk_buff *udp6_gro_receive(struct list_head *head, struct sk_buff *skb) |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 127 | { |
| 128 | struct udphdr *uh = udp_gro_udphdr(skb); |
Alexander Lobakin | 55e7298 | 2020-11-11 20:45:38 +0000 | [diff] [blame] | 129 | struct sock *sk = NULL; |
Steffen Klassert | 9fd1ff5 | 2020-01-25 11:26:45 +0100 | [diff] [blame] | 130 | struct sk_buff *pp; |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 131 | |
Steffen Klassert | 9fd1ff5 | 2020-01-25 11:26:45 +0100 | [diff] [blame] | 132 | if (unlikely(!uh)) |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 133 | goto flush; |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 134 | |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 135 | /* Don't bother verifying checksum if we're going to flush anyway. */ |
Scott Wood | 2d8f7e2 | 2014-09-10 21:23:18 -0500 | [diff] [blame] | 136 | if (NAPI_GRO_CB(skb)->flush) |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 137 | goto skip; |
| 138 | |
| 139 | if (skb_gro_checksum_validate_zero_check(skb, IPPROTO_UDP, uh->check, |
| 140 | ip6_gro_compute_pseudo)) |
| 141 | goto flush; |
| 142 | else if (uh->check) |
Li RongQing | b39c78b | 2020-01-03 11:51:00 +0800 | [diff] [blame] | 143 | skb_gro_checksum_try_convert(skb, IPPROTO_UDP, |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 144 | ip6_gro_compute_pseudo); |
| 145 | |
| 146 | skip: |
Tom Herbert | efc98d0 | 2014-10-03 15:48:08 -0700 | [diff] [blame] | 147 | NAPI_GRO_CB(skb)->is_ipv6 = 1; |
Alexander Lobakin | 55e7298 | 2020-11-11 20:45:38 +0000 | [diff] [blame] | 148 | |
| 149 | if (static_branch_unlikely(&udpv6_encap_needed_key)) |
| 150 | sk = udp6_gro_lookup_skb(skb, uh->source, uh->dest); |
| 151 | |
Steffen Klassert | 9fd1ff5 | 2020-01-25 11:26:45 +0100 | [diff] [blame] | 152 | pp = udp_gro_receive(head, skb, uh, sk); |
Steffen Klassert | 9fd1ff5 | 2020-01-25 11:26:45 +0100 | [diff] [blame] | 153 | return pp; |
Tom Herbert | 2abb7cd | 2014-08-31 15:12:43 -0700 | [diff] [blame] | 154 | |
| 155 | flush: |
| 156 | NAPI_GRO_CB(skb)->flush = 1; |
| 157 | return NULL; |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 158 | } |
| 159 | |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 160 | INDIRECT_CALLABLE_SCOPE int udp6_gro_complete(struct sk_buff *skb, int nhoff) |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 161 | { |
| 162 | const struct ipv6hdr *ipv6h = ipv6_hdr(skb); |
| 163 | struct udphdr *uh = (struct udphdr *)(skb->data + nhoff); |
| 164 | |
Paolo Abeni | e0e3070 | 2021-03-30 12:28:51 +0200 | [diff] [blame] | 165 | /* do fraglist only if there is no outer UDP encap (or we already processed it) */ |
| 166 | if (NAPI_GRO_CB(skb)->is_flist && !NAPI_GRO_CB(skb)->encap_mark) { |
Steffen Klassert | 9fd1ff5 | 2020-01-25 11:26:45 +0100 | [diff] [blame] | 167 | uh->len = htons(skb->len - nhoff); |
| 168 | |
| 169 | skb_shinfo(skb)->gso_type |= (SKB_GSO_FRAGLIST|SKB_GSO_UDP_L4); |
| 170 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
| 171 | |
| 172 | if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
| 173 | if (skb->csum_level < SKB_MAX_CSUM_LEVEL) |
| 174 | skb->csum_level++; |
| 175 | } else { |
| 176 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 177 | skb->csum_level = 0; |
| 178 | } |
| 179 | |
| 180 | return 0; |
| 181 | } |
| 182 | |
Paolo Abeni | e20cf8d | 2018-11-07 12:38:29 +0100 | [diff] [blame] | 183 | if (uh->check) |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 184 | uh->check = ~udp_v6_check(skb->len - nhoff, &ipv6h->saddr, |
| 185 | &ipv6h->daddr, 0); |
| 186 | |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 187 | return udp_gro_complete(skb, nhoff, udp6_lib_lookup_skb); |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 188 | } |
| 189 | |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 190 | static const struct net_offload udpv6_offload = { |
Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 191 | .callbacks = { |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 192 | .gso_segment = udp6_ufo_fragment, |
Tom Herbert | 57c67ff | 2014-08-22 13:34:44 -0700 | [diff] [blame] | 193 | .gro_receive = udp6_gro_receive, |
| 194 | .gro_complete = udp6_gro_complete, |
Vlad Yasevich | f191a1d | 2012-11-15 08:49:23 +0000 | [diff] [blame] | 195 | }, |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 196 | }; |
| 197 | |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 198 | int udpv6_offload_init(void) |
Vlad Yasevich | 5edbb07 | 2012-11-15 08:49:18 +0000 | [diff] [blame] | 199 | { |
| 200 | return inet6_add_offload(&udpv6_offload, IPPROTO_UDP); |
| 201 | } |
Tom Herbert | a602456 | 2016-04-05 08:22:51 -0700 | [diff] [blame] | 202 | |
| 203 | int udpv6_offload_exit(void) |
| 204 | { |
| 205 | return inet6_del_offload(&udpv6_offload, IPPROTO_UDP); |
| 206 | } |