blob: 1796856bc24f55c6d9b35b9aa2fbf3892f16dd9b [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Vlad Yasevich8663e022012-11-15 08:49:17 +00002/*
3 * IPV6 GSO/GRO offload support
4 * Linux INET6 implementation
5 *
Vlad Yasevich8663e022012-11-15 08:49:17 +00006 * TCPv6 GSO/GRO support
7 */
Paolo Abeni028e0a42018-12-14 11:51:59 +01008#include <linux/indirect_call_wrapper.h>
Vlad Yasevich8663e022012-11-15 08:49:17 +00009#include <linux/skbuff.h>
10#include <net/protocol.h>
11#include <net/tcp.h>
12#include <net/ip6_checksum.h>
13#include "ip6_offload.h"
14
Paolo Abeni028e0a42018-12-14 11:51:59 +010015INDIRECT_CALLABLE_SCOPE
16struct sk_buff *tcp6_gro_receive(struct list_head *head, struct sk_buff *skb)
Vlad Yasevich8663e022012-11-15 08:49:17 +000017{
Herbert Xucc5c00b2013-11-22 10:31:29 +080018 /* Don't bother verifying checksum if we're going to flush anyway. */
Tom Herbert149d0772014-08-22 13:34:30 -070019 if (!NAPI_GRO_CB(skb)->flush &&
20 skb_gro_checksum_validate(skb, IPPROTO_TCP,
21 ip6_gro_compute_pseudo)) {
Vlad Yasevich8663e022012-11-15 08:49:17 +000022 NAPI_GRO_CB(skb)->flush = 1;
23 return NULL;
Vlad Yasevich8663e022012-11-15 08:49:17 +000024 }
25
26 return tcp_gro_receive(head, skb);
27}
28
Paolo Abeni028e0a42018-12-14 11:51:59 +010029INDIRECT_CALLABLE_SCOPE int tcp6_gro_complete(struct sk_buff *skb, int thoff)
Vlad Yasevich8663e022012-11-15 08:49:17 +000030{
31 const struct ipv6hdr *iph = ipv6_hdr(skb);
32 struct tcphdr *th = tcp_hdr(skb);
33
Jerry Chu299603e82013-12-11 20:53:45 -080034 th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
35 &iph->daddr, 0);
Jerry Chuc3caf112014-07-14 15:54:46 -070036 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
Vlad Yasevich8663e022012-11-15 08:49:17 +000037
38 return tcp_gro_complete(skb);
39}
40
Eric Dumazet74abc202015-02-26 19:08:59 -080041static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb,
42 netdev_features_t features)
Tom Herbertd020f8f2014-09-20 14:52:28 -070043{
44 struct tcphdr *th;
45
Willem de Bruijn121d57a2018-01-19 09:29:18 -050046 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
47 return ERR_PTR(-EINVAL);
48
Tom Herbertd020f8f2014-09-20 14:52:28 -070049 if (!pskb_may_pull(skb, sizeof(*th)))
50 return ERR_PTR(-EINVAL);
51
52 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
53 const struct ipv6hdr *ipv6h = ipv6_hdr(skb);
54 struct tcphdr *th = tcp_hdr(skb);
55
56 /* Set up pseudo header, usually expect stack to have done
57 * this.
58 */
59
60 th->check = 0;
61 skb->ip_summed = CHECKSUM_PARTIAL;
62 __tcp_v6_send_check(skb, &ipv6h->saddr, &ipv6h->daddr);
63 }
64
65 return tcp_gso_segment(skb, features);
66}
Vlad Yasevich8663e022012-11-15 08:49:17 +000067static const struct net_offload tcpv6_offload = {
Vlad Yasevichf191a1d2012-11-15 08:49:23 +000068 .callbacks = {
Tom Herbertd020f8f2014-09-20 14:52:28 -070069 .gso_segment = tcp6_gso_segment,
Vlad Yasevichf191a1d2012-11-15 08:49:23 +000070 .gro_receive = tcp6_gro_receive,
71 .gro_complete = tcp6_gro_complete,
72 },
Vlad Yasevich8663e022012-11-15 08:49:17 +000073};
74
75int __init tcpv6_offload_init(void)
76{
77 return inet6_add_offload(&tcpv6_offload, IPPROTO_TCP);
78}