Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 2 | /* |
| 3 | * IPV4 GSO/GRO offload support |
| 4 | * Linux INET implementation |
| 5 | * |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 6 | * TCPv4 GSO/GRO support |
| 7 | */ |
| 8 | |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 9 | #include <linux/indirect_call_wrapper.h> |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 10 | #include <linux/skbuff.h> |
Eric Dumazet | 4721031 | 2021-11-15 09:05:51 -0800 | [diff] [blame] | 11 | #include <net/gro.h> |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 12 | #include <net/tcp.h> |
| 13 | #include <net/protocol.h> |
| 14 | |
Willem de Bruijn | f066e2b | 2014-08-06 15:09:44 -0400 | [diff] [blame] | 15 | static void tcp_gso_tstamp(struct sk_buff *skb, unsigned int ts_seq, |
| 16 | unsigned int seq, unsigned int mss) |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 17 | { |
| 18 | while (skb) { |
Willem de Bruijn | f066e2b | 2014-08-06 15:09:44 -0400 | [diff] [blame] | 19 | if (before(ts_seq, seq + mss)) { |
| 20 | skb_shinfo(skb)->tx_flags |= SKBTX_SW_TSTAMP; |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 21 | skb_shinfo(skb)->tskey = ts_seq; |
| 22 | return; |
| 23 | } |
| 24 | |
| 25 | skb = skb->next; |
| 26 | seq += mss; |
| 27 | } |
| 28 | } |
| 29 | |
Eric Dumazet | 74abc20 | 2015-02-26 19:08:59 -0800 | [diff] [blame] | 30 | static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, |
| 31 | netdev_features_t features) |
Tom Herbert | d020f8f | 2014-09-20 14:52:28 -0700 | [diff] [blame] | 32 | { |
Willem de Bruijn | 121d57a | 2018-01-19 09:29:18 -0500 | [diff] [blame] | 33 | if (!(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)) |
| 34 | return ERR_PTR(-EINVAL); |
| 35 | |
Tom Herbert | d020f8f | 2014-09-20 14:52:28 -0700 | [diff] [blame] | 36 | if (!pskb_may_pull(skb, sizeof(struct tcphdr))) |
| 37 | return ERR_PTR(-EINVAL); |
| 38 | |
| 39 | if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) { |
| 40 | const struct iphdr *iph = ip_hdr(skb); |
| 41 | struct tcphdr *th = tcp_hdr(skb); |
| 42 | |
| 43 | /* Set up checksum pseudo header, usually expect stack to |
| 44 | * have done this already. |
| 45 | */ |
| 46 | |
| 47 | th->check = 0; |
| 48 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 49 | __tcp_v4_send_check(skb, iph->saddr, iph->daddr); |
| 50 | } |
| 51 | |
| 52 | return tcp_gso_segment(skb, features); |
| 53 | } |
| 54 | |
Eric Dumazet | 28be6e0 | 2013-10-18 10:36:17 -0700 | [diff] [blame] | 55 | struct sk_buff *tcp_gso_segment(struct sk_buff *skb, |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 56 | netdev_features_t features) |
| 57 | { |
| 58 | struct sk_buff *segs = ERR_PTR(-EINVAL); |
Eric Dumazet | 0d08c42 | 2013-10-25 17:26:17 -0700 | [diff] [blame] | 59 | unsigned int sum_truesize = 0; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 60 | struct tcphdr *th; |
| 61 | unsigned int thlen; |
| 62 | unsigned int seq; |
| 63 | __be32 delta; |
| 64 | unsigned int oldlen; |
| 65 | unsigned int mss; |
| 66 | struct sk_buff *gso_skb = skb; |
| 67 | __sum16 newcheck; |
| 68 | bool ooo_okay, copy_destructor; |
| 69 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 70 | th = tcp_hdr(skb); |
| 71 | thlen = th->doff * 4; |
| 72 | if (thlen < sizeof(*th)) |
| 73 | goto out; |
| 74 | |
| 75 | if (!pskb_may_pull(skb, thlen)) |
| 76 | goto out; |
| 77 | |
| 78 | oldlen = (u16)~skb->len; |
| 79 | __skb_pull(skb, thlen); |
| 80 | |
Eric Dumazet | a7eea41 | 2015-06-11 09:15:15 -0700 | [diff] [blame] | 81 | mss = skb_shinfo(skb)->gso_size; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 82 | if (unlikely(skb->len <= mss)) |
| 83 | goto out; |
| 84 | |
| 85 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
| 86 | /* Packet is from an untrusted source, reset gso_segs. */ |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 87 | |
| 88 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
| 89 | |
| 90 | segs = NULL; |
| 91 | goto out; |
| 92 | } |
| 93 | |
| 94 | copy_destructor = gso_skb->destructor == tcp_wfree; |
| 95 | ooo_okay = gso_skb->ooo_okay; |
| 96 | /* All segments but the first should have ooo_okay cleared */ |
| 97 | skb->ooo_okay = 0; |
| 98 | |
| 99 | segs = skb_segment(skb, features); |
| 100 | if (IS_ERR(segs)) |
| 101 | goto out; |
| 102 | |
| 103 | /* Only first segment might have ooo_okay set */ |
| 104 | segs->ooo_okay = ooo_okay; |
| 105 | |
Steffen Klassert | 07b26c9 | 2016-09-19 12:58:47 +0200 | [diff] [blame] | 106 | /* GSO partial and frag_list segmentation only requires splitting |
| 107 | * the frame into an MSS multiple and possibly a remainder, both |
| 108 | * cases return a GSO skb. So update the mss now. |
| 109 | */ |
| 110 | if (skb_is_gso(segs)) |
| 111 | mss *= skb_shinfo(segs)->gso_segs; |
| 112 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 113 | delta = htonl(oldlen + (thlen + mss)); |
| 114 | |
| 115 | skb = segs; |
| 116 | th = tcp_hdr(skb); |
| 117 | seq = ntohl(th->seq); |
| 118 | |
Willem de Bruijn | 4ed2d76 | 2014-08-04 22:11:49 -0400 | [diff] [blame] | 119 | if (unlikely(skb_shinfo(gso_skb)->tx_flags & SKBTX_SW_TSTAMP)) |
| 120 | tcp_gso_tstamp(segs, skb_shinfo(gso_skb)->tskey, seq, mss); |
| 121 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 122 | newcheck = ~csum_fold((__force __wsum)((__force u32)th->check + |
| 123 | (__force u32)delta)); |
| 124 | |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 125 | while (skb->next) { |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 126 | th->fin = th->psh = 0; |
| 127 | th->check = newcheck; |
| 128 | |
Alexander Duyck | 08b64fc | 2016-02-05 15:27:49 -0800 | [diff] [blame] | 129 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 130 | gso_reset_checksum(skb, ~th->check); |
| 131 | else |
Tom Herbert | e9c3a24 | 2014-06-04 17:20:09 -0700 | [diff] [blame] | 132 | th->check = gso_make_checksum(skb, ~th->check); |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 133 | |
| 134 | seq += mss; |
| 135 | if (copy_destructor) { |
| 136 | skb->destructor = gso_skb->destructor; |
| 137 | skb->sk = gso_skb->sk; |
Eric Dumazet | 0d08c42 | 2013-10-25 17:26:17 -0700 | [diff] [blame] | 138 | sum_truesize += skb->truesize; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 139 | } |
| 140 | skb = skb->next; |
| 141 | th = tcp_hdr(skb); |
| 142 | |
| 143 | th->seq = htonl(seq); |
| 144 | th->cwr = 0; |
Alexander Duyck | 802ab55 | 2016-04-10 21:45:03 -0400 | [diff] [blame] | 145 | } |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 146 | |
| 147 | /* Following permits TCP Small Queues to work well with GSO : |
| 148 | * The callback to TCP stack will be called at the time last frag |
| 149 | * is freed at TX completion, and not right now when gso_skb |
| 150 | * is freed by GSO engine |
| 151 | */ |
| 152 | if (copy_destructor) { |
Eric Dumazet | 7ec318f | 2017-11-07 15:15:04 -0800 | [diff] [blame] | 153 | int delta; |
| 154 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 155 | swap(gso_skb->sk, skb->sk); |
| 156 | swap(gso_skb->destructor, skb->destructor); |
Eric Dumazet | 0d08c42 | 2013-10-25 17:26:17 -0700 | [diff] [blame] | 157 | sum_truesize += skb->truesize; |
Eric Dumazet | 7ec318f | 2017-11-07 15:15:04 -0800 | [diff] [blame] | 158 | delta = sum_truesize - gso_skb->truesize; |
| 159 | /* In some pathological cases, delta can be negative. |
| 160 | * We need to either use refcount_add() or refcount_sub_and_test() |
| 161 | */ |
| 162 | if (likely(delta >= 0)) |
| 163 | refcount_add(delta, &skb->sk->sk_wmem_alloc); |
| 164 | else |
| 165 | WARN_ON_ONCE(refcount_sub_and_test(-delta, &skb->sk->sk_wmem_alloc)); |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 166 | } |
| 167 | |
| 168 | delta = htonl(oldlen + (skb_tail_pointer(skb) - |
| 169 | skb_transport_header(skb)) + |
| 170 | skb->data_len); |
| 171 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
| 172 | (__force u32)delta)); |
Alexander Duyck | 08b64fc | 2016-02-05 15:27:49 -0800 | [diff] [blame] | 173 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 174 | gso_reset_checksum(skb, ~th->check); |
| 175 | else |
Tom Herbert | e9c3a24 | 2014-06-04 17:20:09 -0700 | [diff] [blame] | 176 | th->check = gso_make_checksum(skb, ~th->check); |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 177 | out: |
| 178 | return segs; |
| 179 | } |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 180 | |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 181 | struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb) |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 182 | { |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 183 | struct sk_buff *pp = NULL; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 184 | struct sk_buff *p; |
| 185 | struct tcphdr *th; |
| 186 | struct tcphdr *th2; |
| 187 | unsigned int len; |
| 188 | unsigned int thlen; |
| 189 | __be32 flags; |
| 190 | unsigned int mss = 1; |
| 191 | unsigned int hlen; |
| 192 | unsigned int off; |
| 193 | int flush = 1; |
| 194 | int i; |
| 195 | |
| 196 | off = skb_gro_offset(skb); |
| 197 | hlen = off + sizeof(*th); |
| 198 | th = skb_gro_header_fast(skb, off); |
| 199 | if (skb_gro_header_hard(skb, hlen)) { |
| 200 | th = skb_gro_header_slow(skb, hlen, off); |
| 201 | if (unlikely(!th)) |
| 202 | goto out; |
| 203 | } |
| 204 | |
| 205 | thlen = th->doff * 4; |
| 206 | if (thlen < sizeof(*th)) |
| 207 | goto out; |
| 208 | |
| 209 | hlen = off + thlen; |
| 210 | if (skb_gro_header_hard(skb, hlen)) { |
| 211 | th = skb_gro_header_slow(skb, hlen, off); |
| 212 | if (unlikely(!th)) |
| 213 | goto out; |
| 214 | } |
| 215 | |
| 216 | skb_gro_pull(skb, thlen); |
| 217 | |
| 218 | len = skb_gro_len(skb); |
| 219 | flags = tcp_flag_word(th); |
| 220 | |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 221 | list_for_each_entry(p, head, list) { |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 222 | if (!NAPI_GRO_CB(p)->same_flow) |
| 223 | continue; |
| 224 | |
| 225 | th2 = tcp_hdr(p); |
| 226 | |
| 227 | if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { |
| 228 | NAPI_GRO_CB(p)->same_flow = 0; |
| 229 | continue; |
| 230 | } |
| 231 | |
| 232 | goto found; |
| 233 | } |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 234 | p = NULL; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 235 | goto out_check_final; |
| 236 | |
| 237 | found: |
Jerry Chu | bf5a755 | 2014-01-07 10:23:19 -0800 | [diff] [blame] | 238 | /* Include the IP ID check below from the inner most IP hdr */ |
Alexander Duyck | 1530545 | 2016-04-10 21:44:57 -0400 | [diff] [blame] | 239 | flush = NAPI_GRO_CB(p)->flush; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 240 | flush |= (__force int)(flags & TCP_FLAG_CWR); |
| 241 | flush |= (__force int)((flags ^ tcp_flag_word(th2)) & |
| 242 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); |
| 243 | flush |= (__force int)(th->ack_seq ^ th2->ack_seq); |
| 244 | for (i = sizeof(*th); i < thlen; i += 4) |
| 245 | flush |= *(u32 *)((u8 *)th + i) ^ |
| 246 | *(u32 *)((u8 *)th2 + i); |
| 247 | |
Alexander Duyck | 1530545 | 2016-04-10 21:44:57 -0400 | [diff] [blame] | 248 | /* When we receive our second frame we can made a decision on if we |
| 249 | * continue this flow as an atomic flow with a fixed ID or if we use |
| 250 | * an incrementing ID. |
| 251 | */ |
| 252 | if (NAPI_GRO_CB(p)->flush_id != 1 || |
| 253 | NAPI_GRO_CB(p)->count != 1 || |
| 254 | !NAPI_GRO_CB(p)->is_atomic) |
| 255 | flush |= NAPI_GRO_CB(p)->flush_id; |
| 256 | else |
| 257 | NAPI_GRO_CB(p)->is_atomic = false; |
| 258 | |
Eric Dumazet | a7eea41 | 2015-06-11 09:15:15 -0700 | [diff] [blame] | 259 | mss = skb_shinfo(p)->gso_size; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 260 | |
| 261 | flush |= (len - 1) >= mss; |
| 262 | flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); |
Boris Pismenny | 41ed9c0 | 2018-07-13 14:33:38 +0300 | [diff] [blame] | 263 | #ifdef CONFIG_TLS_DEVICE |
| 264 | flush |= p->decrypted ^ skb->decrypted; |
| 265 | #endif |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 266 | |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 267 | if (flush || skb_gro_receive(p, skb)) { |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 268 | mss = 1; |
| 269 | goto out_check_final; |
| 270 | } |
| 271 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 272 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); |
| 273 | |
| 274 | out_check_final: |
| 275 | flush = len < mss; |
| 276 | flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | |
| 277 | TCP_FLAG_RST | TCP_FLAG_SYN | |
| 278 | TCP_FLAG_FIN)); |
| 279 | |
| 280 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) |
David Miller | d4546c2 | 2018-06-24 14:13:49 +0900 | [diff] [blame] | 281 | pp = p; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 282 | |
| 283 | out: |
Jerry Chu | bf5a755 | 2014-01-07 10:23:19 -0800 | [diff] [blame] | 284 | NAPI_GRO_CB(skb)->flush |= (flush != 0); |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 285 | |
| 286 | return pp; |
| 287 | } |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 288 | |
| 289 | int tcp_gro_complete(struct sk_buff *skb) |
| 290 | { |
| 291 | struct tcphdr *th = tcp_hdr(skb); |
| 292 | |
Jerry Chu | 299603e8 | 2013-12-11 20:53:45 -0800 | [diff] [blame] | 293 | skb->csum_start = (unsigned char *)th - skb->head; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 294 | skb->csum_offset = offsetof(struct tcphdr, check); |
| 295 | skb->ip_summed = CHECKSUM_PARTIAL; |
| 296 | |
| 297 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
| 298 | |
| 299 | if (th->cwr) |
| 300 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 301 | |
Jakub Sitnicki | d51c590 | 2021-07-29 15:48:20 +0200 | [diff] [blame] | 302 | if (skb->encapsulation) |
| 303 | skb->inner_transport_header = skb->transport_header; |
| 304 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 305 | return 0; |
| 306 | } |
| 307 | EXPORT_SYMBOL(tcp_gro_complete); |
| 308 | |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 309 | INDIRECT_CALLABLE_SCOPE |
| 310 | struct sk_buff *tcp4_gro_receive(struct list_head *head, struct sk_buff *skb) |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 311 | { |
Herbert Xu | cc5c00b | 2013-11-22 10:31:29 +0800 | [diff] [blame] | 312 | /* Don't bother verifying checksum if we're going to flush anyway. */ |
Tom Herbert | 149d077 | 2014-08-22 13:34:30 -0700 | [diff] [blame] | 313 | if (!NAPI_GRO_CB(skb)->flush && |
| 314 | skb_gro_checksum_validate(skb, IPPROTO_TCP, |
| 315 | inet_gro_compute_pseudo)) { |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 316 | NAPI_GRO_CB(skb)->flush = 1; |
| 317 | return NULL; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 318 | } |
| 319 | |
| 320 | return tcp_gro_receive(head, skb); |
| 321 | } |
| 322 | |
Paolo Abeni | 028e0a4 | 2018-12-14 11:51:59 +0100 | [diff] [blame] | 323 | INDIRECT_CALLABLE_SCOPE int tcp4_gro_complete(struct sk_buff *skb, int thoff) |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 324 | { |
| 325 | const struct iphdr *iph = ip_hdr(skb); |
| 326 | struct tcphdr *th = tcp_hdr(skb); |
| 327 | |
Jerry Chu | 299603e8 | 2013-12-11 20:53:45 -0800 | [diff] [blame] | 328 | th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr, |
| 329 | iph->daddr, 0); |
Jerry Chu | c3caf11 | 2014-07-14 15:54:46 -0700 | [diff] [blame] | 330 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4; |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 331 | |
Alexander Duyck | 1530545 | 2016-04-10 21:44:57 -0400 | [diff] [blame] | 332 | if (NAPI_GRO_CB(skb)->is_atomic) |
| 333 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; |
| 334 | |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 335 | return tcp_gro_complete(skb); |
| 336 | } |
| 337 | |
| 338 | static const struct net_offload tcpv4_offload = { |
| 339 | .callbacks = { |
Tom Herbert | d020f8f | 2014-09-20 14:52:28 -0700 | [diff] [blame] | 340 | .gso_segment = tcp4_gso_segment, |
Daniel Borkmann | 28850dc | 2013-06-07 05:11:46 +0000 | [diff] [blame] | 341 | .gro_receive = tcp4_gro_receive, |
| 342 | .gro_complete = tcp4_gro_complete, |
| 343 | }, |
| 344 | }; |
| 345 | |
| 346 | int __init tcpv4_offload_init(void) |
| 347 | { |
| 348 | return inet_add_offload(&tcpv4_offload, IPPROTO_TCP); |
| 349 | } |