blob: 39db5a2268550b54da126e59ea6b6f6c0fc24b1c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Vlad Yasevich8663e022012-11-15 08:49:17 +00002/*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 *
Vlad Yasevich8663e022012-11-15 08:49:17 +00006 * TCPv6 GSO/GRO support
7 */
Paolo Abeni028e0a42018-12-14 11:51:59 +01008#include <linux/indirect_call_wrapper.h>
Vlad Yasevich8663e022012-11-15 08:49:17 +00009#include <linux/skbuff.h>
Eric Dumazet47210312021-11-15 09:05:51 -080010#include <net/gro.h>
Vlad Yasevich8663e022012-11-15 08:49:17 +000011#include <net/protocol.h>
12#include <net/tcp.h>
13#include <net/ip6_checksum.h>
14#include "ip6_offload.h"
15
Paolo Abeni028e0a42018-12-14 11:51:59 +010016INDIRECT_CALLABLE_SCOPE
17struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
Vlad Yasevich8663e022012-11-15 08:49:17 +000018{
Herbert Xucc5c00b2013-11-22 10:31:29 +080019 /* Don't bother verifying checksum if we're going to flush anyway. */
Tom Herbert149d0772014-08-22 13:34:30 -070020 if (!NAPI_GRO_CB(skb)->flush &&
21 skb_gro_checksum_validate(skb, IPPROTO_TCP,
22 ip6_gro_compute_pseudo)) {
Vlad Yasevich8663e022012-11-15 08:49:17 +000023 NAPI_GRO_CB(skb)->flush = 1;
24 return NULL;
Vlad Yasevich8663e022012-11-15 08:49:17 +000025 }
26
27 return tcp_gro_receive(head, skb);
28}
29
Paolo Abeni028e0a42018-12-14 11:51:59 +010030INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
Vlad Yasevich8663e022012-11-15 08:49:17 +000031{
32 const struct ipv6hdr *iph = ipv6_hdr(skb);
33 struct tcphdr *th = tcp_hdr(skb);
34
Jerry Chu299603e82013-12-11 20:53:45 -080035 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
36 &iph->daddr, 0);
Jerry Chuc3caf112014-07-14 15:54:46 -070037 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
Vlad Yasevich8663e022012-11-15 08:49:17 +000038
39 return tcp_gro_complete(skb);
40}
41
Eric Dumazet74abc202015-02-26 19:08:59 -080042static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
43 netdev_features_t features)
Tom Herbertd020f8f2014-09-20 14:52:28 -070044{
45 struct tcphdr *th;
46
Willem de Bruijn121d57a2018-01-19 09:29:18 -050047 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
48 return ERR_PTR(-EINVAL);
49
Tom Herbertd020f8f2014-09-20 14:52:28 -070050 if (!pskb_may_pull(skb, sizeof(*th)))
51 return ERR_PTR(-EINVAL);
52
53 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
54 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
55 struct tcphdr *th = tcp_hdr(skb);
56
57 /* Set up pseudo header, usually expect stack to have done
58 * this.
59 */
60
61 th->check = 0;
62 skb->ip_summed = CHECKSUM_PARTIAL;
63 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
64 }
65
66 return tcp_gso_segment(skb, features);
67}
Vlad Yasevich8663e022012-11-15 08:49:17 +000068static const struct net_offload tcpv6_offload = {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +000069 .callbacks = {
Tom Herbertd020f8f2014-09-20 14:52:28 -070070 .gso_segment = tcp6_gso_segment,
Vlad Yasevichf191a1d2012-11-15 08:49:23 +000071 .gro_receive = tcp6_gro_receive,
72 .gro_complete = tcp6_gro_complete,
73 },
Vlad Yasevich8663e022012-11-15 08:49:17 +000074};
75
76int __init tcpv6_offload_init(void)
77{
78 return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
79}