blob: 30abde86db45e560680669ddfb533bcf12deacb0 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Daniel Borkmann28850dc2013-06-07 05:11:46 +00002/*
3 * IPV4 GSO/GRO offload support
4 * Linux INET implementation
5 *
Daniel Borkmann28850dc2013-06-07 05:11:46 +00006 * TCPv4 GSO/GRO support
7 */
8
Paolo Abeni028e0a42018-12-14 11:51:59 +01009#include <linux/indirect_call_wrapper.h>
Daniel Borkmann28850dc2013-06-07 05:11:46 +000010#include <linux/skbuff.h>
Eric Dumazet47210312021-11-15 09:05:51 -080011#include <net/gro.h>
Daniel Borkmann28850dc2013-06-07 05:11:46 +000012#include <net/tcp.h>
13#include <net/protocol.h>
14
Willem de Bruijnf066e2b2014-08-06 15:09:44 -040015static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq,
16 unsigned int seq, unsigned int mss)
Willem de Bruijn4ed2d762014-08-04 22:11:49 -040017{
18 while (skb) {
Willem de Bruijnf066e2b2014-08-06 15:09:44 -040019 if (before(ts_seq, seq + mss)) {
20 skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP;
Willem de Bruijn4ed2d762014-08-04 22:11:49 -040021 skb_shinfo(skb)->tskey = ts_seq;
22 return;
23 }
24
25 skb = skb->next;
26 seq += mss;
27 }
28}
29
Eric Dumazet74abc202015-02-26 19:08:59 -080030static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb,
31 netdev_features_t features)
Tom Herbertd020f8f2014-09-20 14:52:28 -070032{
Willem de Bruijn121d57a2018-01-19 09:29:18 -050033 if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4))
34 return ERR_PTR(-EINVAL);
35
Tom Herbertd020f8f2014-09-20 14:52:28 -070036 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
37 return ERR_PTR(-EINVAL);
38
39 if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
40 const struct iphdr *iph = ip_hdr(skb);
41 struct tcphdr *th = tcp_hdr(skb);
42
43 /* Set up checksum pseudo header, usually expect stack to
44 * have done this already.
45 */
46
47 th->check = 0;
48 skb->ip_summed = CHECKSUM_PARTIAL;
49 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
50 }
51
52 return tcp_gso_segment(skb, features);
53}
54
Eric Dumazet28be6e02013-10-18 10:36:17 -070055struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
Daniel Borkmann28850dc2013-06-07 05:11:46 +000056 netdev_features_t features)
57{
58 struct sk_buff *segs = ERR_PTR(-EINVAL);
Eric Dumazet0d08c422013-10-25 17:26:17 -070059 unsigned int sum_truesize = 0;
Daniel Borkmann28850dc2013-06-07 05:11:46 +000060 struct tcphdr *th;
61 unsigned int thlen;
62 unsigned int seq;
63 __be32 delta;
64 unsigned int oldlen;
65 unsigned int mss;
66 struct sk_buff *gso_skb = skb;
67 __sum16 newcheck;
68 bool ooo_okay, copy_destructor;
69
Daniel Borkmann28850dc2013-06-07 05:11:46 +000070 th = tcp_hdr(skb);
71 thlen = th->doff * 4;
72 if (thlen < sizeof(*th))
73 goto out;
74
75 if (!pskb_may_pull(skb, thlen))
76 goto out;
77
78 oldlen = (u16)~skb->len;
79 __skb_pull(skb, thlen);
80
Eric Dumazeta7eea412015-06-11 09:15:15 -070081 mss = skb_shinfo(skb)->gso_size;
Daniel Borkmann28850dc2013-06-07 05:11:46 +000082 if (unlikely(skb->len <= mss))
83 goto out;
84
85 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
86 /* Packet is from an untrusted source, reset gso_segs. */
Daniel Borkmann28850dc2013-06-07 05:11:46 +000087
88 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
89
90 segs = NULL;
91 goto out;
92 }
93
94 copy_destructor = gso_skb->destructor == tcp_wfree;
95 ooo_okay = gso_skb->ooo_okay;
96 /* All segments but the first should have ooo_okay cleared */
97 skb->ooo_okay = 0;
98
99 segs = skb_segment(skb, features);
100 if (IS_ERR(segs))
101 goto out;
102
103 /* Only first segment might have ooo_okay set */
104 segs->ooo_okay = ooo_okay;
105
Steffen Klassert07b26c92016-09-19 12:58:47 +0200106 /* GSO partial and frag_list segmentation only requires splitting
107 * the frame into an MSS multiple and possibly a remainder, both
108 * cases return a GSO skb. So update the mss now.
109 */
110 if (skb_is_gso(segs))
111 mss *= skb_shinfo(segs)->gso_segs;
112
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000113 delta = htonl(oldlen + (thlen + mss));
114
115 skb = segs;
116 th = tcp_hdr(skb);
117 seq = ntohl(th->seq);
118
Willem de Bruijn4ed2d762014-08-04 22:11:49 -0400119 if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP))
120 tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss);
121
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000122 newcheck = ~csum_fold((__force __wsum)((__force u32)th->check +
123 (__force u32)delta));
124
Alexander Duyck802ab552016-04-10 21:45:03 -0400125 while (skb->next) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000126 th->fin = th->psh = 0;
127 th->check = newcheck;
128
Alexander Duyck08b64fc2016-02-05 15:27:49 -0800129 if (skb->ip_summed == CHECKSUM_PARTIAL)
130 gso_reset_checksum(skb, ~th->check);
131 else
Tom Herberte9c3a242014-06-04 17:20:09 -0700132 th->check = gso_make_checksum(skb, ~th->check);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000133
134 seq += mss;
135 if (copy_destructor) {
136 skb->destructor = gso_skb->destructor;
137 skb->sk = gso_skb->sk;
Eric Dumazet0d08c422013-10-25 17:26:17 -0700138 sum_truesize += skb->truesize;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000139 }
140 skb = skb->next;
141 th = tcp_hdr(skb);
142
143 th->seq = htonl(seq);
144 th->cwr = 0;
Alexander Duyck802ab552016-04-10 21:45:03 -0400145 }
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000146
147 /* Following permits TCP Small Queues to work well with GSO :
148 * The callback to TCP stack will be called at the time last frag
149 * is freed at TX completion, and not right now when gso_skb
150 * is freed by GSO engine
151 */
152 if (copy_destructor) {
Eric Dumazet7ec318f2017-11-07 15:15:04 -0800153 int delta;
154
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000155 swap(gso_skb->sk, skb->sk);
156 swap(gso_skb->destructor, skb->destructor);
Eric Dumazet0d08c422013-10-25 17:26:17 -0700157 sum_truesize += skb->truesize;
Eric Dumazet7ec318f2017-11-07 15:15:04 -0800158 delta = sum_truesize - gso_skb->truesize;
159 /* In some pathological cases, delta can be negative.
160 * We need to either use refcount_add() or refcount_sub_and_test()
161 */
162 if (likely(delta >= 0))
163 refcount_add(delta, &skb->sk->sk_wmem_alloc);
164 else
165 WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc));
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000166 }
167
168 delta = htonl(oldlen + (skb_tail_pointer(skb) -
169 skb_transport_header(skb)) +
170 skb->data_len);
171 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
172 (__force u32)delta));
Alexander Duyck08b64fc2016-02-05 15:27:49 -0800173 if (skb->ip_summed == CHECKSUM_PARTIAL)
174 gso_reset_checksum(skb, ~th->check);
175 else
Tom Herberte9c3a242014-06-04 17:20:09 -0700176 th->check = gso_make_checksum(skb, ~th->check);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000177out:
178 return segs;
179}
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000180
David Millerd4546c22018-06-24 14:13:49 +0900181struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb)
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000182{
David Millerd4546c22018-06-24 14:13:49 +0900183 struct sk_buff *pp = NULL;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000184 struct sk_buff *p;
185 struct tcphdr *th;
186 struct tcphdr *th2;
187 unsigned int len;
188 unsigned int thlen;
189 __be32 flags;
190 unsigned int mss = 1;
191 unsigned int hlen;
192 unsigned int off;
193 int flush = 1;
194 int i;
195
196 off = skb_gro_offset(skb);
197 hlen = off + sizeof(*th);
198 th = skb_gro_header_fast(skb, off);
199 if (skb_gro_header_hard(skb, hlen)) {
200 th = skb_gro_header_slow(skb, hlen, off);
201 if (unlikely(!th))
202 goto out;
203 }
204
205 thlen = th->doff * 4;
206 if (thlen < sizeof(*th))
207 goto out;
208
209 hlen = off + thlen;
210 if (skb_gro_header_hard(skb, hlen)) {
211 th = skb_gro_header_slow(skb, hlen, off);
212 if (unlikely(!th))
213 goto out;
214 }
215
216 skb_gro_pull(skb, thlen);
217
218 len = skb_gro_len(skb);
219 flags = tcp_flag_word(th);
220
David Millerd4546c22018-06-24 14:13:49 +0900221 list_for_each_entry(p, head, list) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000222 if (!NAPI_GRO_CB(p)->same_flow)
223 continue;
224
225 th2 = tcp_hdr(p);
226
227 if (*(u32 *)&th->source ^ *(u32 *)&th2->source) {
228 NAPI_GRO_CB(p)->same_flow = 0;
229 continue;
230 }
231
232 goto found;
233 }
David Millerd4546c22018-06-24 14:13:49 +0900234 p = NULL;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000235 goto out_check_final;
236
237found:
Jerry Chubf5a7552014-01-07 10:23:19 -0800238 /* Include the IP ID check below from the inner most IP hdr */
Alexander Duyck15305452016-04-10 21:44:57 -0400239 flush = NAPI_GRO_CB(p)->flush;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000240 flush |= (__force int)(flags & TCP_FLAG_CWR);
241 flush |= (__force int)((flags ^ tcp_flag_word(th2)) &
242 ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH));
243 flush |= (__force int)(th->ack_seq ^ th2->ack_seq);
244 for (i = sizeof(*th); i < thlen; i += 4)
245 flush |= *(u32 *)((u8 *)th + i) ^
246 *(u32 *)((u8 *)th2 + i);
247
Alexander Duyck15305452016-04-10 21:44:57 -0400248 /* When we receive our second frame we can made a decision on if we
249 * continue this flow as an atomic flow with a fixed ID or if we use
250 * an incrementing ID.
251 */
252 if (NAPI_GRO_CB(p)->flush_id != 1 ||
253 NAPI_GRO_CB(p)->count != 1 ||
254 !NAPI_GRO_CB(p)->is_atomic)
255 flush |= NAPI_GRO_CB(p)->flush_id;
256 else
257 NAPI_GRO_CB(p)->is_atomic = false;
258
Eric Dumazeta7eea412015-06-11 09:15:15 -0700259 mss = skb_shinfo(p)->gso_size;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000260
261 flush |= (len - 1) >= mss;
262 flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq);
Boris Pismenny41ed9c02018-07-13 14:33:38 +0300263#ifdef CONFIG_TLS_DEVICE
264 flush |= p->decrypted ^ skb->decrypted;
265#endif
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000266
David Millerd4546c22018-06-24 14:13:49 +0900267 if (flush || skb_gro_receive(p, skb)) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000268 mss = 1;
269 goto out_check_final;
270 }
271
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000272 tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH);
273
274out_check_final:
275 flush = len < mss;
276 flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH |
277 TCP_FLAG_RST | TCP_FLAG_SYN |
278 TCP_FLAG_FIN));
279
280 if (p && (!NAPI_GRO_CB(skb)->same_flow || flush))
David Millerd4546c22018-06-24 14:13:49 +0900281 pp = p;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000282
283out:
Jerry Chubf5a7552014-01-07 10:23:19 -0800284 NAPI_GRO_CB(skb)->flush |= (flush != 0);
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000285
286 return pp;
287}
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000288
289int tcp_gro_complete(struct sk_buff *skb)
290{
291 struct tcphdr *th = tcp_hdr(skb);
292
Jerry Chu299603e82013-12-11 20:53:45 -0800293 skb->csum_start = (unsigned char *)th - skb->head;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000294 skb->csum_offset = offsetof(struct tcphdr, check);
295 skb->ip_summed = CHECKSUM_PARTIAL;
296
297 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
298
299 if (th->cwr)
300 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
301
Jakub Sitnickid51c5902021-07-29 15:48:20 +0200302 if (skb->encapsulation)
303 skb->inner_transport_header = skb->transport_header;
304
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000305 return 0;
306}
307EXPORT_SYMBOL(tcp_gro_complete);
308
Paolo Abeni028e0a42018-12-14 11:51:59 +0100309INDIRECT_CALLABLE_SCOPE
310struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb)
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000311{
Herbert Xucc5c00b2013-11-22 10:31:29 +0800312 /* Don't bother verifying checksum if we're going to flush anyway. */
Tom Herbert149d0772014-08-22 13:34:30 -0700313 if (!NAPI_GRO_CB(skb)->flush &&
314 skb_gro_checksum_validate(skb, IPPROTO_TCP,
315 inet_gro_compute_pseudo)) {
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000316 NAPI_GRO_CB(skb)->flush = 1;
317 return NULL;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000318 }
319
320 return tcp_gro_receive(head, skb);
321}
322
Paolo Abeni028e0a42018-12-14 11:51:59 +0100323INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff)
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000324{
325 const struct iphdr *iph = ip_hdr(skb);
326 struct tcphdr *th = tcp_hdr(skb);
327
Jerry Chu299603e82013-12-11 20:53:45 -0800328 th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
329 iph->daddr, 0);
Jerry Chuc3caf112014-07-14 15:54:46 -0700330 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000331
Alexander Duyck15305452016-04-10 21:44:57 -0400332 if (NAPI_GRO_CB(skb)->is_atomic)
333 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
334
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000335 return tcp_gro_complete(skb);
336}
337
338static const struct net_offload tcpv4_offload = {
339 .callbacks = {
Tom Herbertd020f8f2014-09-20 14:52:28 -0700340 .gso_segment = tcp4_gso_segment,
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000341 .gro_receive = tcp4_gro_receive,
342 .gro_complete = tcp4_gro_complete,
343 },
344};
345
346int __init tcpv4_offload_init(void)
347{
348 return inet_add_offload(&tcpv4_offload, IPPROTO_TCP);
349}